text stringlengths 4 1.02M | meta dict |
|---|---|
__author__ = 'Krishna Mudragada'
import logging,time
class EuclidGCD:
def __init__(self):
logging.basicConfig(format='%(asctime)s %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.info("Initializing EuclidGCD..")
self.numberList = []
def addToList(self, number ):
self.numberList.append(number)
def printList(self):
logging.info('\t'.join(map(str, self.numberList)))
def gcd(self, x, y):
while x>0:
logging.debug("IN the while loop x= " + str(x) + " y = " +str(y))
if(x<y):
t=x
x=y
y=t
x = x % y
return y
def gcdOfList(self):
return str(reduce(self.gcd,self.numberList))
def main():
gcdObject = EuclidGCD()
while True:
number_input = raw_input("Enter number for calculating GCD:")
if (number_input == ''):
logging.info("End of input")
break
if(not number_input.isdigit()):
logging.error("Enter a valid number")
continue
gcdObject.addToList(int(number_input))
gcdObject.printList()
start_time = time.time()
logging.info("GCD of numbers is " + gcdObject.gcdOfList())
logging.info("%s seconds taken - " %(time.time() - start_time))
if __name__ == "__main__":
main() | {
"content_hash": "16ec3ddc653bc4733fe5d7fb06fc775f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 31.74418604651163,
"alnum_prop": 0.5545787545787546,
"repo_name": "mudragada/util-scripts",
"id": "a9a88f0d60e53323a72d744ceb789e8ddc98087e",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyProblems/Algos/EuclidGCD.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79980"
}
],
"symlink_target": ""
} |
import os
path_to_target_dir = "./target/"
# function to run shell command
def run_command(com):
print com
output = os.popen(com).read()
a = output.split('\n')
for b in a:
print b
# function to run shell command but no output to screen
def run_command_n(com):
newc = com + ' >> log.out 2>&1'
os.popen(newc)
fin = open(path_to_target_dir + 'convert_table.txt','r')
os.chdir(path_to_target_dir + 'cluster/')
curr_path_to_target_dir = "../../"
for line in fin.readlines():
line_list = line.split(' ')
did = line_list[0]
if (int(did) % 100 == 0):
print "finish lucene_step for "+did+ " clusters"
os.chdir(did+'/')
run_command_n(curr_path_to_target_dir + 'phase2_index --index index < all_fragments.txt')
os.chdir('..')
| {
"content_hash": "c491139e5a70993c1b15d72af97c58bb",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 93,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6076433121019108,
"repo_name": "nimisha-srinivasa/RTP",
"id": "0ee00b8a0f56e6362fcc291aeed26c640cd0ecc8",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/phase2/index_step/runjob_index.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6944"
},
{
"name": "C",
"bytes": "45440"
},
{
"name": "C++",
"bytes": "226380"
},
{
"name": "CSS",
"bytes": "1262"
},
{
"name": "HTML",
"bytes": "154860"
},
{
"name": "Java",
"bytes": "2934197"
},
{
"name": "Lex",
"bytes": "10586"
},
{
"name": "Makefile",
"bytes": "2768"
},
{
"name": "Python",
"bytes": "7323"
},
{
"name": "Shell",
"bytes": "4679"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pubbrain_app', '0005_auto_20150401_1046'),
]
operations = [
migrations.CreateModel(
name='SearchToRegion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.IntegerField(null=True, blank=True)),
('side', models.CharField(max_length=1, choices=[(b'l', b'left'), (b'r', b'right'), (b'u', b'uni')])),
('brain_region', models.ForeignKey(to='pubbrain_app.BrainRegion')),
('pubmed_search', models.ForeignKey(to='pubbrain_app.PubmedSearch')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='pubmedsearch',
name='left_brain_regions',
),
migrations.RemoveField(
model_name='pubmedsearch',
name='right_brain_regions',
),
migrations.RemoveField(
model_name='pubmedsearch',
name='uni_brain_regions',
),
migrations.AddField(
model_name='pubmedsearch',
name='brain_regions',
field=models.ManyToManyField(to='pubbrain_app.BrainRegion', null=True, through='pubbrain_app.SearchToRegion', blank=True),
preserve_default=True,
),
]
| {
"content_hash": "05cc1128d02afda08fbec788da059402",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 134,
"avg_line_length": 34.93181818181818,
"alnum_prop": 0.5543266102797658,
"repo_name": "jbwexler/PubBrain",
"id": "9a816a1b372f2196f7324a3f24330d952903796c",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pubbrain_app/migrations/0006_auto_20150401_1124.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20784"
},
{
"name": "CoffeeScript",
"bytes": "76391"
},
{
"name": "HTML",
"bytes": "27170"
},
{
"name": "JavaScript",
"bytes": "244908"
},
{
"name": "Makefile",
"bytes": "81"
},
{
"name": "Python",
"bytes": "96937"
}
],
"symlink_target": ""
} |
from datetime import date
from openerp.tests.common import TransactionCase
from openerp.osv.orm import except_orm
class TestPeriodState(TransactionCase):
"""
Forbid creation of Journal Entries for a closed period.
"""
def setUp(self):
super(TestPeriodState, self).setUp()
cr, uid = self.cr, self.uid
self.wizard_period_close = self.registry('account.period.close')
self.wizard_period_close_id = self.wizard_period_close.create(cr, uid, {'sure': 1})
_, self.sale_journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "sales_journal")
_, self.period_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_0")
def test_period_state(self):
cr, uid = self.cr, self.uid
self.wizard_period_close.data_save(cr, uid, [self.wizard_period_close_id], {
'lang': 'en_US',
'active_model': 'account.period',
'active_ids': [self.period_id],
'tz': False,
'active_id': self.period_id
})
with self.assertRaises(except_orm):
self.registry('account.move').create(cr, uid, {
'name': '/',
'period_id': self.period_id,
'journal_id': self.sale_journal_id,
'date': date.today(),
'line_id': [(0, 0, {
'name': 'foo',
'debit': 10,
}), (0, 0, {
'name': 'bar',
'credit': 10,
})]
})
| {
"content_hash": "e17cb5a6129e2541078411f13b41e7b2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 122,
"avg_line_length": 39.390243902439025,
"alnum_prop": 0.5226006191950464,
"repo_name": "cristianquaglio/odoo",
"id": "4e88994f2d20847730d046ceb8837b2c7e48a879",
"size": "1615",
"binary": false,
"copies": "130",
"ref": "refs/heads/master",
"path": "addons/account/tests/test_account_move_closed_period.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
} |
import logging
import pickle
from sklearn.datasets import fetch_20newsgroups
import numpy as np
from lda2vec import preprocess, Corpus
logging.basicConfig()
# Fetch data
remove = ('headers', 'footers', 'quotes')
texts = fetch_20newsgroups(subset='train', remove=remove).data
# Remove tokens with these substrings
bad = set(["ax>", '`@("', '---', '===', '^^^'])
def clean(line):
return ' '.join(w for w in line.split() if not any(t in w for t in bad))
# Preprocess data
max_length = 10000 # Limit of 10k words per document
# Convert to unicode (spaCy only works with unicode)
texts = [unicode(clean(d)) for d in texts]
tokens, vocab = preprocess.tokenize(texts, max_length, merge=False,
n_threads=4)
corpus = Corpus()
# Make a ranked list of rare vs frequent words
corpus.update_word_count(tokens)
corpus.finalize()
# The tokenization uses spaCy indices, and so may have gaps
# between indices for words that aren't present in our dataset.
# This builds a new compact index
compact = corpus.to_compact(tokens)
# Remove extremely rare words
pruned = corpus.filter_count(compact, min_count=30)
# Convert the compactified arrays into bag of words arrays
bow = corpus.compact_to_bow(pruned)
# Words tend to have power law frequency, so selectively
# downsample the most prevalent words
clean = corpus.subsample_frequent(pruned)
# Now flatten a 2D array of document per row and word position
# per column to a 1D array of words. This will also remove skips
# and OoV words
doc_ids = np.arange(pruned.shape[0])
flattened, (doc_ids,) = corpus.compact_to_flat(pruned, doc_ids)
assert flattened.min() >= 0
# Fill in the pretrained word vectors
n_dim = 300
fn_wordvc = 'GoogleNews-vectors-negative300.bin'
vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc)
# Save all of the preprocessed files
pickle.dump(vocab, open('vocab.pkl', 'w'))
pickle.dump(corpus, open('corpus.pkl', 'w'))
np.save("flattened", flattened)
np.save("doc_ids", doc_ids)
np.save("pruned", pruned)
np.save("bow", bow)
np.save("vectors", vectors)
| {
"content_hash": "501ae4b0ab1455176192665752f23784",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 35.152542372881356,
"alnum_prop": 0.7227579556412729,
"repo_name": "cemoody/lda2vec",
"id": "af3281758cc38ca3f5c9110e121fb56805140cc7",
"size": "2233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/twenty_newsgroups/data/preprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69375"
}
],
"symlink_target": ""
} |
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'biowonks/bitkPy'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| {
"content_hash": "df86e2635596ba718e7e8df579693241",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 30.891666666666666,
"alnum_prop": 0.6808740221203129,
"repo_name": "biowonks/bitkPy",
"id": "0196821b3ed550f54a1d87ad225369569fdb8258",
"size": "3753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "travis_pypi_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "8537"
}
],
"symlink_target": ""
} |
"""@@@
Main Module: Cluster.py
Objects: Cluster
ClustData
Author: Wayne Dawson
creation date: parts 2016 (in chreval), made into a separate object 170426
last update: 200210 (upgrade to python3), 170719, 190812
version: 0
Purpose:
This is used to prepare data for heat-maps and other activities like
this. Currently, this program reads or helps generate files with the
extensions "clust","heat", "eheat", and "cpif". Classes in this module
are still used by chreval.py and shuffle_heatmap.py.
Comments:
190719:
Examining the code, it seems that I have removed any requirement
these settings make use of the object from class Calculate. Class
Cluster is used by chreval to generate the heatmap and cpif file,
but it doesn't seem to require any prior information about heatmap
files. Therefore, I have now entirely decoupled this from anything
associated with Calculate. Although Calculate/chreval use this
package, they don't require any special settings from HeatMapTools
that were required at an earlier step. In fact, even Clust, which
is used by Calculate, does not require any settings from
clust. Therefore, I have decoupled any association of HeatMapTools
with either of the two packages here.
180709:
There seem to a variety of data formats that have been created in
this Chreval package. There is Motif (the most complete), then
there is LThread/LNode, which is hopelessly less complete and only
marginally different from Pair. Here I have this ChPair.
I think this data format was originally developed because Pair or
LNode were too highly entwined with Calculate to use them for
writing code to generate 3D structure building information. The
remaining utility of this class is for building shuffled data of
the pairing interactions. The shuffling is used to validate the 3D
cryo-EM like structure fitting programs. Since this file contains
only the pairing information, it is fairly easy to to shuffle heat
maps without changing other information.
"""
from FileTools import FileTools
from FileTools import getHeadExt
from BasicTools import initialize_matrix
from HeatMapTools import HeatMapTools
from LThread import DispLThread
import sys
import random
import os
# #####################################
# ##### configuration variables #####
# #####################################
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
EXTS = ["clust","heat", "eheat", "cpif", "csv"] # extension for the input file
PROGRAM = "Cluster.py" # name of the program
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class Cluster:
def __init__(self, calc):
self.calc = calc
self.N = calc.N
# initialize the free energy matrix
self.clusters = []
self.clusters = initialize_matrix(self.clusters, self.N, 0.0)
self.cpif = []
self.cpif = initialize_matrix(self.cpif, self.N, 0.0)
self.debug = False
#
def clusterlist(self, ltlist):
htools = HeatMapTools()
wt = htools.normalize_matrix(self.calc.fe.dG, "neg")
if self.debug:
print (len(ltlist))
#
ij_min = 100000.0
for ltk in ltlist:
pBoltz = ltk.p
for tr in ltk.thread:
# ignore linking information
ctp = tr.ctp
btp = tr.btp
if tr.ctp == 'P' or tr.ctp == 'J':
# print ("found a P at ", tr.ij_ndx)
continue
#
if ctp == 'K' and (btp == 'bgn' or btp == 'end'):
continue
#
if ctp == 'W' and (btp == 'bgn' or btp == 'end'):
continue
#
if ctp == 'S' and (btp == 'bgn' or btp == 'end'):
continue
#
v = tr.ij_ndx
i = v[0]; j = v[1]
# print (v)
self.clusters[i][j] += 1.0*pBoltz*wt[i][j] # ij
self.clusters[j][i] += 1.0*pBoltz*wt[i][j] # ji
if self.clusters[i][j] < ij_min:
ij_min = self.clusters[i][j]
#
#|endfor tr in ltk.thread:
#|endfor tr in ltk.thread:
if ij_min < 0.0:
shift = - ij_min
print ("encountered positive entropy values")
print ("up-shifting free energy map data by ", shift)
for j in range(0, self.N):
for i in range(0, self.N):
if not self.clusters[i][j] == 0.0:
self.clusters[i][j] += shift
self.clusters[j][i] += shift
#
#
#
#
#
def cpiflist(self, ltlist):
debug_cpiflist = False
Nlt = len(ltlist)
if self.debug:
print (Nlt)
#
kk = 0 # structure count
for ltk in ltlist:
kk += 1
if debug_cpiflist:
# this shows that whole contents of ltk =
# LThread(). One may need to see all of LThread to
# understand where (or if) something is wrong.
disthr = DispLThread(self.N)
disthr.disp_LThread(ltk)
#
for tr in ltk.thread:
# ignore linking information
ctp = tr.ctp
btp = tr.btp
if ctp == 'P' or ctp == 'J':
# print ("found a P for ", tr.ij_ndx)
continue
#
if ctp == 'K' and (btp == 'bgn' or btp == 'end'):
if debug_cpiflist:
# verify that data is handled correctly
print (kk, tr.disp_lnode())
#
continue
elif ctp == 'K':
if debug_cpiflist:
# verify that data is handled correctly
print (kk, tr.disp_lnode())
#
#
if ctp == 'W' and (btp == 'bgn' or btp == 'end'):
if debug_cpiflist:
# verify that data is handled correctly
print (kk, tr.disp_lnode())
#
continue
elif ctp == 'W':
if debug_cpiflist:
# verify that data is handled correctly
print (kk, tr.disp_lnode())
#
#
if ctp == 'S' and (btp == 'bgn' or btp == 'end'):
continue
#
v = tr.ij_ndx
i = v[0]; j = v[1]
# print (v)
self.cpif[i][j] += 1.0 # ij
self.cpif[j][i] += 1.0 # ji
#|endfor tr in ltk.thread:
if debug_cpiflist:
print ("planned exit")
sys.exit(0)
#
#|endfor ltk in ltlist:
return 0
#
#
class ClustData:
"""@@@
190719: Looking at this now, I'm not really sure why this had to
be developed separate from the programs in HeatMapTools;
particularly the method get_data. Anyway, it is used by
make_heatmap.py to generate heatmaps.
"""
def __init__(self, use_raw_data):
self.DEBUG = False # True
self.use_raw_data = use_raw_data
self.from_Nenski = False
self.hm_max = -1000.0
self.hm_min = 1000.0
self.wt_range = -1.0
self.weight = {}
self.dist = {}
self.title = "None"
#
def set_Nenski(self):
self.from_Nenski = True
#
def get_data(self, flnm):
flhd, ext = getHeadExt(flnm)
self.title = flhd
print ("getting data from %s" % flnm)
htools = HeatMapTools() # default setup GenerateHeatMapTools()
if self.from_Nenski:
htools.set_Nenski()
#
htools.fileformat = ext
hm = None
N = -1
if ext == "csv":
gmtrx = htools.read_heatmap(flnm)
else:
if self.use_raw_data:
gmtrx = htools.read_MatrixFile(flnm, EXTS) # EXT = "clust","heat"
else:
gmtrx = htools.read_MatrixFile_wt(flnm, EXTS) # EXT = "clust"
#
#
N = gmtrx.length
hm = gmtrx.heatmap
clusters = gmtrx.clusters
if self.DEBUG:
print (htools.disp_fmatrix(hm, "heatmap"))
#
self.hm_max = htools.hm_max
self.hm_min = htools.hm_min
self.wt_range = htools.wt_range
return hm, N
#
# there could be several different types of heatmaps; e.g.,
# heatmap, clust, cpif. This provides a general way to build and
# display all of them.
def get_distribution(self, hm):
N = len(hm)
self.weight = {}
self.dist = {}
for j in range(0,N):
for i in range(0,j):
if not hm[i][j] == 0:
d = j - i
if d in self.dist:
self.dist[d] += 1
else:
self.dist.update({d : 1 })
#
wt = self.wt_range*hm[i][j]
if wt in self.weight:
self.weight[wt] += 1
else:
self.weight.update({wt : 1})
#
#
#|endfor
#|endfor
if self.DEBUG:
print ("weight: ", len(self.weight))
print ("dist: ", len(self.dist))
#
#
def disp_WeightDistrib(self):
flag_skip = False
if self.title == "None":
flag_skip = True
#
if len(self.weight) == 0:
flag_skip = True
#
s = ''
if not flag_skip:
keys = list(self.weight.keys())
keys.sort()
"""@
In python3 keys becomes
dict_keys([ .... ]).
As far as I can tell, it looks like the order is according
the way the items were entered. If we don't need to sort
the data, then we could just write
for v in keys:
print v
#
However, we want the keys sorted, so we will have to work
around this. The procedure is as follows
keys = list(self.weight.keys())
keys.sort()
So we must request the list before we do the sorting.
"""
s += "# %s\n" % self.title
s += "# instances of same matrix element\n"
s += "# matrix number\n"
s += "# element of\n"
s += "# found instances\n"
for ww in keys:
s += " %8.3f %3d\n" % (ww, self.weight[ww])
#
return s
#
def disp_DistDistrib(self):
flag_skip = False
if self.title == "None":
flag_skip = True
#
if len(self.dist) == 0:
flag_skip = True
#
s = ''
if not flag_skip:
keys = list(self.dist.keys())
keys.sort()
s += "# %s\n" % self.title
s += "# genomic distance vs counts\n"
s += "# dist counts\n"
for dd in keys:
s += " %4d %3d\n" % (dd, self.dist[dd])
#|endfor
#
return s
#
#
| {
"content_hash": "347031a88228118cb6ba4569f7b2ab6d",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 81,
"avg_line_length": 30.17391304347826,
"alnum_prop": 0.45381043868075566,
"repo_name": "4dnucleome/looper",
"id": "e4e37628843e4c5b0713dd3493afdeb9229d17c2",
"size": "12516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chreval/Cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "479319"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from config.models import OpenstackSettings, NodeSettings
admin.site.register(OpenstackSettings)
admin.site.register(NodeSettings)
| {
"content_hash": "d6165736a172adc48831732cecf1cb3a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.8554216867469879,
"repo_name": "Havate/havate-openstack",
"id": "2ff9eff0f848189b6422effb1f8550fc91ec40ea",
"size": "166",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "proto-build/gui/config/admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407618"
},
{
"name": "HTML",
"bytes": "507406"
},
{
"name": "JavaScript",
"bytes": "25322"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "21665856"
},
{
"name": "Shell",
"bytes": "62617"
}
],
"symlink_target": ""
} |
"""
Django settings for shredshyft project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sf^+9%&jvv)lvl-27*$v3x+)=8c#x6b@dmm*x^50)fky_c#p+6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'redshiftsettings.apps.RedshiftsettingsConfig',
'dashboards.apps.DashboardsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shredshyft.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shredshyft.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "661b8f3eaba8ed94335c5d7e8d925617",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 91,
"avg_line_length": 26.404761904761905,
"alnum_prop": 0.682897505259994,
"repo_name": "gerricchaplin/shredshyft",
"id": "be878f19591aa33b0c784e8552271e93e89d6e60",
"size": "3327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shredshyft/shredshyft/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2439"
},
{
"name": "Python",
"bytes": "12734"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
} |
"""
========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. currentmodule:: scipy.interpolate
Sub-package for objects used in interpolation.
As listed below, this sub-package contains spline functions and classes,
1-D and multidimensional (univariate and multivariate)
interpolation classes, Lagrange and Taylor polynomial interpolators, and
wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
and DFITPACK functions.
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
interp1d
BarycentricInterpolator
KroghInterpolator
barycentric_interpolate
krogh_interpolate
pchip_interpolate
CubicHermiteSpline
PchipInterpolator
Akima1DInterpolator
CubicSpline
PPoly
BPoly
Multivariate interpolation
==========================
Unstructured data:
.. autosummary::
:toctree: generated/
griddata
LinearNDInterpolator
NearestNDInterpolator
CloughTocher2DInterpolator
RBFInterpolator
Rbf
interp2d
For data on a grid:
.. autosummary::
:toctree: generated/
interpn
RegularGridInterpolator
RectBivariateSpline
.. seealso::
`scipy.ndimage.map_coordinates`
Tensor product polynomials:
.. autosummary::
:toctree: generated/
NdPPoly
1-D Splines
===========
.. autosummary::
:toctree: generated/
BSpline
make_interp_spline
make_lsq_spline
Functional interface to FITPACK routines:
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
splder
splantider
insert
Object-oriented FITPACK interface:
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
2-D Splines
===========
For data on a grid:
.. autosummary::
:toctree: generated/
RectBivariateSpline
RectSphereBivariateSpline
For unstructured data:
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
SmoothSphereBivariateSpline
LSQBivariateSpline
LSQSphereBivariateSpline
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
pade
.. seealso::
`scipy.ndimage.map_coordinates`,
`scipy.ndimage.spline_filter`,
`scipy.signal.resample`,
`scipy.signal.bspline`,
`scipy.signal.gauss_spline`,
`scipy.signal.qspline1d`,
`scipy.signal.cspline1d`,
`scipy.signal.qspline1d_eval`,
`scipy.signal.cspline1d_eval`,
`scipy.signal.qspline2d`,
`scipy.signal.cspline2d`.
``pchip`` is an alias of `PchipInterpolator` for backward compatibility
(should not be used in new code).
"""
from ._interpolate import *
from ._fitpack_py import *
# New interface to fitpack library:
from ._fitpack2 import *
from ._rbf import Rbf
from ._rbfinterp import *
from ._polyint import *
from ._cubic import *
from ._ndgriddata import *
from ._bsplines import *
from ._pade import *
# Deprecated namespaces, to be removed in v2.0.0
from . import fitpack, fitpack2, interpolate, ndgriddata, polyint
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# Backward compatibility
pchip = PchipInterpolator
| {
"content_hash": "42bf1f2568cac360fee5f5ecec7140e3",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 72,
"avg_line_length": 17.510204081632654,
"alnum_prop": 0.6960955710955711,
"repo_name": "matthew-brett/scipy",
"id": "cf81c88d63ead6a83b76fa91cd615ecefb46f539",
"size": "3432",
"binary": false,
"copies": "1",
"ref": "refs/heads/polished-meson-windows",
"path": "scipy/interpolate/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
import os
from typing import Callable, Dict, List
from urllib.parse import urlparse
from airflow import models
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataflow import DataflowJobStatus
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning,
DataflowCreateJavaJobOperator,
DataflowCreatePythonJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.providers.google.cloud.sensors.dataflow import (
DataflowJobAutoScalingEventsSensor,
DataflowJobMessagesSensor,
DataflowJobMetricsSensor,
DataflowJobStatusSensor,
)
from airflow.providers.google.cloud.transfers.gcs_to_local import GCSToLocalFilesystemOperator
from airflow.utils.dates import days_ago
GCS_TMP = os.environ.get('GCP_DATAFLOW_GCS_TMP', 'gs://INVALID BUCKET NAME/temp/')
GCS_STAGING = os.environ.get('GCP_DATAFLOW_GCS_STAGING', 'gs://INVALID BUCKET NAME/staging/')
GCS_OUTPUT = os.environ.get('GCP_DATAFLOW_GCS_OUTPUT', 'gs://INVALID BUCKET NAME/output')
GCS_JAR = os.environ.get('GCP_DATAFLOW_JAR', 'gs://INVALID BUCKET NAME/word-count-beam-bundled-0.1.jar')
GCS_PYTHON = os.environ.get('GCP_DATAFLOW_PYTHON', 'gs://INVALID BUCKET NAME/wordcount_debugging.py')
GCS_JAR_PARTS = urlparse(GCS_JAR)
GCS_JAR_BUCKET_NAME = GCS_JAR_PARTS.netloc
GCS_JAR_OBJECT_NAME = GCS_JAR_PARTS.path[1:]
default_args = {
'dataflow_default_options': {
'tempLocation': GCS_TMP,
'stagingLocation': GCS_STAGING,
}
}
with models.DAG(
"example_gcp_dataflow_native_java",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as dag_native_java:
# [START howto_operator_start_java_job_jar_on_gcs]
start_java_job = DataflowCreateJavaJobOperator(
task_id="start-java-job",
jar=GCS_JAR,
job_name='{{task.task_id}}',
options={
'output': GCS_OUTPUT,
},
poll_sleep=10,
job_class='org.apache.beam.examples.WordCount',
check_if_running=CheckJobRunning.IgnoreJob,
location='europe-west3',
)
# [END howto_operator_start_java_job_jar_on_gcs]
# [START howto_operator_start_java_job_local_jar]
jar_to_local = GCSToLocalFilesystemOperator(
task_id="jar-to-local",
bucket=GCS_JAR_BUCKET_NAME,
object_name=GCS_JAR_OBJECT_NAME,
filename="/tmp/dataflow-{{ ds_nodash }}.jar",
)
start_java_job_local = DataflowCreateJavaJobOperator(
task_id="start-java-job-local",
jar="/tmp/dataflow-{{ ds_nodash }}.jar",
job_name='{{task.task_id}}',
options={
'output': GCS_OUTPUT,
},
poll_sleep=10,
job_class='org.apache.beam.examples.WordCount',
check_if_running=CheckJobRunning.WaitForRun,
)
jar_to_local >> start_java_job_local
# [END howto_operator_start_java_job_local_jar]
with models.DAG(
"example_gcp_dataflow_native_python",
default_args=default_args,
start_date=days_ago(1),
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag_native_python:
# [START howto_operator_start_python_job]
start_python_job = DataflowCreatePythonJobOperator(
task_id="start-python-job",
py_file=GCS_PYTHON,
py_options=[],
job_name='{{task.task_id}}',
options={
'output': GCS_OUTPUT,
},
py_requirements=['apache-beam[gcp]==2.21.0'],
py_interpreter='python3',
py_system_site_packages=False,
location='europe-west3',
)
# [END howto_operator_start_python_job]
start_python_job_local = DataflowCreatePythonJobOperator(
task_id="start-python-job-local",
py_file='apache_beam.examples.wordcount',
py_options=['-m'],
job_name='{{task.task_id}}',
options={
'output': GCS_OUTPUT,
},
py_requirements=['apache-beam[gcp]==2.14.0'],
py_interpreter='python3',
py_system_site_packages=False,
)
with models.DAG(
"example_gcp_dataflow_native_python_async",
default_args=default_args,
start_date=days_ago(1),
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag_native_python_async:
# [START howto_operator_start_python_job_async]
start_python_job_async = DataflowCreatePythonJobOperator(
task_id="start-python-job-async",
py_file=GCS_PYTHON,
py_options=[],
job_name='{{task.task_id}}',
options={
'output': GCS_OUTPUT,
},
py_requirements=['apache-beam[gcp]==2.25.0'],
py_interpreter='python3',
py_system_site_packages=False,
location='europe-west3',
wait_until_finished=False,
)
# [END howto_operator_start_python_job_async]
# [START howto_sensor_wait_for_job_status]
wait_for_python_job_async_done = DataflowJobStatusSensor(
task_id="wait-for-python-job-async-done",
job_id="{{task_instance.xcom_pull('start-python-job-async')['job_id']}}",
expected_statuses={DataflowJobStatus.JOB_STATE_DONE},
location='europe-west3',
)
# [END howto_sensor_wait_for_job_status]
# [START howto_sensor_wait_for_job_metric]
def check_metric_scalar_gte(metric_name: str, value: int) -> Callable:
"""Check is metric greater than equals to given value."""
def callback(metrics: List[Dict]) -> bool:
dag_native_python_async.log.info("Looking for '%s' >= %d", metric_name, value)
for metric in metrics:
context = metric.get("name", {}).get("context", {})
original_name = context.get("original_name", "")
tentative = context.get("tentative", "")
if original_name == "Service-cpu_num_seconds" and not tentative:
return metric["scalar"] >= value
raise AirflowException(f"Metric '{metric_name}' not found in metrics")
return callback
wait_for_python_job_async_metric = DataflowJobMetricsSensor(
task_id="wait-for-python-job-async-metric",
job_id="{{task_instance.xcom_pull('start-python-job-async')['job_id']}}",
location='europe-west3',
callback=check_metric_scalar_gte(metric_name="Service-cpu_num_seconds", value=100),
)
# [END howto_sensor_wait_for_job_metric]
# [START howto_sensor_wait_for_job_message]
def check_message(messages: List[dict]) -> bool:
"""Check message"""
for message in messages:
if "Adding workflow start and stop steps." in message.get("messageText", ""):
return True
return False
wait_for_python_job_async_message = DataflowJobMessagesSensor(
task_id="wait-for-python-job-async-message",
job_id="{{task_instance.xcom_pull('start-python-job-async')['job_id']}}",
location='europe-west3',
callback=check_message,
)
# [END howto_sensor_wait_for_job_message]
# [START howto_sensor_wait_for_job_autoscaling_event]
def check_autoscaling_event(autoscaling_events: List[dict]) -> bool:
"""Check autoscaling event"""
for autoscaling_event in autoscaling_events:
if "Worker pool started." in autoscaling_event.get("description", {}).get("messageText", ""):
return True
return False
wait_for_python_job_async_autoscaling_event = DataflowJobAutoScalingEventsSensor(
task_id="wait-for-python-job-async-autoscaling-event",
job_id="{{task_instance.xcom_pull('start-python-job-async')['job_id']}}",
location='europe-west3',
callback=check_autoscaling_event,
)
# [END howto_sensor_wait_for_job_autoscaling_event]
start_python_job_async >> wait_for_python_job_async_done
start_python_job_async >> wait_for_python_job_async_metric
start_python_job_async >> wait_for_python_job_async_message
start_python_job_async >> wait_for_python_job_async_autoscaling_event
with models.DAG(
"example_gcp_dataflow_template",
default_args=default_args,
start_date=days_ago(1),
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag_template:
# [START howto_operator_start_template_job]
start_template_job = DataflowTemplatedJobStartOperator(
task_id="start-template-job",
template='gs://dataflow-templates/latest/Word_Count',
parameters={'inputFile': "gs://dataflow-samples/shakespeare/kinglear.txt", 'output': GCS_OUTPUT},
location='europe-west3',
)
# [END howto_operator_start_template_job]
| {
"content_hash": "52f401314ebdec4611f53d539637e13e",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 105,
"avg_line_length": 37.467811158798284,
"alnum_prop": 0.6502863688430699,
"repo_name": "sekikn/incubator-airflow",
"id": "6e58ff4d67ffef43402f9a17d7e26512bf209522",
"size": "9518",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/example_dags/example_dataflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import re
from collections import Iterable
from io import StringIO
from itertools import groupby
from typing import List, Tuple, Callable, Any, IO, cast
from smartchangelog import datetools
from smartchangelog.commit import Commit
class Node:
def __init__(self, name: str = None, criterion: property = None, children: Tuple['Node'] = None,
value: Commit = None) -> None:
self._parent: 'Node' = None
self.name = name
self.criterion = criterion
self._children: Tuple['Node'] = None
self.children = children
self.value = value
@property
def parent(self) -> 'Node':
return self._parent
@property
def children(self) -> Tuple['Node']:
return self._children
@children.setter
def children(self, children: Tuple['Node']) -> None:
if children is not None:
for node in children:
node._parent = self
self._children = children
def depth_level(self) -> int:
if self.parent is None:
return 0
else:
return self.parent.depth_level() + 1
def __len__(self):
if not self.children:
return 1
nb_children = 0
for child in self.children:
nb_children += len(child)
return nb_children
@classmethod
def print_multilines(cls, name: str, value: str, file: IO):
if value:
lines = value.split('\n')
if len(lines) == 1:
print(" * {name}: {value}".format(name=name, value=value), file=file)
else:
print(" * {name}:".format(name=name), file=file)
for line in lines:
print(" - {line}".format(line=line), file=file)
@classmethod
def print_leaf(cls, commit: Commit, file: IO) -> None:
print("* subject: {subject}".format(subject=commit.subject or ''), file=file)
cls.print_multilines(name='body', value=commit.body, file=file)
print(" * date: {date}".format(date=datetools.date2str(commit.date)), file=file)
print(" * author: {author}".format(author=commit.author), file=file)
print(" * commit: {id}".format(id=commit.id), file=file)
def print_header(self, node: 'Node', file: IO):
print(
"{header} {criterion_name}: {name}".format(
header="#" * (self.depth_level() + 1),
criterion_name=Commit.property_name(node.criterion),
name=node.name
),
file=file
)
print(file=file)
def report(self) -> str:
sio = StringIO()
with sio:
if self.children is None:
self.print_leaf(commit=self.value, file=sio)
else:
for node in self.children:
if node.name:
self.print_header(node=node, file=sio)
print(node.report().strip('\n'), file=sio)
print(file=sio)
string = sio.getvalue()
return string
class Changelog(List[Commit]):
@classmethod
def parse(cls, log: str) -> 'Changelog':
raw_commits = re.findall('(commit [a-z0-9]{40}\n(?:.|\n)*?)(?=commit [a-z0-9]{40}|$)', log)
return Changelog([Commit.parse(rc) for rc in raw_commits])
def groupby(self, *criteria: property) -> Node:
if len(criteria) == 0:
# Sort
date_prop = cast(property, Commit.date)
date_getter = cast(Callable[[Commit], Any], date_prop.fget)
self.sort(key=date_getter)
return self.node()
criteria_list = list(criteria)
criterion = criteria_list.pop(0)
criterion_getter = cast(Callable[[Commit], Any], criterion.fget)
# Filter
# noinspection PyTypeChecker
categorized_changelog = Changelog([commit for commit in self if criterion_getter(commit) is not None])
# noinspection PyTypeChecker
uncategorized_commits = Changelog([commit for commit in self if criterion_getter(commit) is None])
# Sort
categorized_changelog.sort(key=criterion_getter)
# Arrange
raw_result = self.groupby_to_list(groupby(iterable=categorized_changelog, key=criterion_getter))
children_list: List[Node] = []
for key, group in raw_result:
cl = Changelog(group)
children_list.append(Node(name=str(key), criterion=criterion, children=cl.groupby(*criteria_list).children))
if len(uncategorized_commits) > 0:
children_list.append(uncategorized_commits.node(name="unknown", criterion=criterion))
children = cast(Tuple[Node], tuple(children_list))
return Node(children=children)
def node(self, name: str=None, criterion: property=None) -> Node:
# noinspection PyTypeChecker
children = cast(Tuple[Node], tuple(Node(value=commit) for commit in self))
return Node(name=name, criterion=criterion, children=children)
@classmethod
def groupby_to_list(cls, iterable: Iterable):
return [[key, [i for i in group]] for key, group in iterable]
| {
"content_hash": "1fdb890c7b97039fa6dcfbacc8b420f1",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 120,
"avg_line_length": 34.42384105960265,
"alnum_prop": 0.5802231627549057,
"repo_name": "ngouzy/commitmsg",
"id": "afe6d81f8b9e87a5ca6ec61cf0516a2ff4815017",
"size": "5198",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smartchangelog/changelog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32069"
}
],
"symlink_target": ""
} |
"""
A python script to get all job positions from an xls file.
"""
from openpyxl import load_workbook
import pickle
if __name__ == '__main__':
wb = load_workbook('positions.xlsx')
ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])
jobs = {}
job_titles = ws.__getitem__("A")
job_categories = ws.__getitem__("D")
for title,category in zip(job_titles[1:],job_categories[1:]):
if title.value:
jobs[title.value.lower()] = category.value.lower()
with open('positions','wb') as fp:
pickle.dump(jobs,fp)
| {
"content_hash": "024c8f4319cb4842c426d027d539cc63",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 26.4,
"alnum_prop": 0.6401515151515151,
"repo_name": "skcript/cvscan",
"id": "607ae246accacc0607719f0581756f651c41f81f",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvscan/data/job_positions/scraping_jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41151"
}
],
"symlink_target": ""
} |
"""Contains base class for iterative batch optimization procedures."""
from heuristic_optimization.base import IterativeOptimizer
# It's possible that this is sort of unnecessary, as in: every
# IterativeOptimizer is a (possibly trivial) BatchOptimizer.
# But I think this is somewhat convenient and mostly it'll help me get
# closer to the sampling (in mindset at least).
class BatchOptimizer(IterativeOptimizer):
"""Base class for iterative, batch-evaluation optimizers.
Such optimizers generate a set of positions in each iteration,
which is then evaluated. The next batch is determined based on
these results. (Examples include evolutionary computations,
particle swarm and similar population-based methods unless they
rely on memory beyond the current state.)
"""
def __init__(self, objective_function, obj_fct_is_vectorized=False):
super().__init__(objective_function, obj_fct_is_vectorized)
self.positions = None
self.scores = None
def iterate(self):
"""Update positions and scores."""
self.positions = self.next_positions()
self.scores = self.compute_scores(self.positions)
self._update_historic_best(self.positions, self.scores)
def next_positions(self):
"""Return next batch of positions to be evaluated."""
raise NotImplementedError
| {
"content_hash": "b5577a0e581daccacd19ae73a5264f3d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 41.303030303030305,
"alnum_prop": 0.7212032281731474,
"repo_name": "tjanson/heuristic_optimization",
"id": "7b312414eecd0ee2ad1dd0c7688cf1353bf644ca",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heuristic_optimization/base/batch_optimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13349"
}
],
"symlink_target": ""
} |
"""Cloud Datastore helper functions."""
import sys
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from googledatastore import PropertyFilter, CompositeFilter
from googledatastore import helper as datastore_helper
from googledatastore.connection import Datastore
from googledatastore.connection import RPCError
from apache_beam.internal import auth
from apache_beam.utils import retry
def key_comparator(k1, k2):
"""A comparator for Datastore keys.
Comparison is only valid for keys in the same partition. The comparison here
is between the list of paths for each key.
"""
if k1.partition_id != k2.partition_id:
raise ValueError('Cannot compare keys with different partition ids.')
k2_iter = iter(k2.path)
for k1_path in k1.path:
k2_path = next(k2_iter, None)
if not k2_path:
return 1
result = compare_path(k1_path, k2_path)
if result != 0:
return result
k2_path = next(k2_iter, None)
if k2_path:
return -1
else:
return 0
def compare_path(p1, p2):
"""A comparator for key path.
A path has either an `id` or a `name` field defined. The
comparison works with the following rules:
1. If one path has `id` defined while the other doesn't, then the
one with `id` defined is considered smaller.
2. If both paths have `id` defined, then their ids are compared.
3. If no `id` is defined for both paths, then their `names` are compared.
"""
result = str_compare(p1.kind, p2.kind)
if result != 0:
return result
if p1.HasField('id'):
if not p2.HasField('id'):
return -1
return p1.id - p2.id
if p2.HasField('id'):
return 1
return str_compare(p1.name, p2.name)
def str_compare(s1, s2):
if s1 == s2:
return 0
elif s1 < s2:
return -1
else:
return 1
def get_datastore(project):
"""Returns a Cloud Datastore client."""
credentials = auth.get_service_credentials()
return Datastore(project, credentials)
def make_request(project, namespace, query):
"""Make a Cloud Datastore request for the given query."""
req = datastore_pb2.RunQueryRequest()
req.partition_id.CopyFrom(make_partition(project, namespace))
req.query.CopyFrom(query)
return req
def make_partition(project, namespace):
"""Make a PartitionId for the given project and namespace."""
partition = entity_pb2.PartitionId()
partition.project_id = project
if namespace is not None:
partition.namespace_id = namespace
return partition
def retry_on_rpc_error(exception):
"""A retry filter for Cloud Datastore RPCErrors."""
if isinstance(exception, RPCError):
if exception.code >= 500:
return True
else:
return False
else:
# TODO(vikasrk): Figure out what other errors should be retried.
return False
def fetch_entities(project, namespace, query, datastore):
"""A helper method to fetch entities from Cloud Datastore.
Args:
project: Project ID
namespace: Cloud Datastore namespace
query: Query to be read from
datastore: Cloud Datastore Client
Returns:
An iterator of entities.
"""
return QueryIterator(project, namespace, query, datastore)
def is_key_valid(key):
"""Returns True if a Cloud Datastore key is complete.
A key is complete if its last element has either an id or a name.
"""
if not key.path:
return False
return key.path[-1].HasField('id') or key.path[-1].HasField('name')
def write_mutations(datastore, project, mutations):
"""A helper function to write a batch of mutations to Cloud Datastore.
If a commit fails, it will be retried upto 5 times. All mutations in the
batch will be committed again, even if the commit was partially successful.
If the retry limit is exceeded, the last exception from Cloud Datastore will
be raised.
"""
commit_request = datastore_pb2.CommitRequest()
commit_request.mode = datastore_pb2.CommitRequest.NON_TRANSACTIONAL
commit_request.project_id = project
for mutation in mutations:
commit_request.mutations.add().CopyFrom(mutation)
@retry.with_exponential_backoff(num_retries=5,
retry_filter=retry_on_rpc_error)
def commit(req):
datastore.commit(req)
commit(commit_request)
def make_latest_timestamp_query(namespace):
"""Make a Query to fetch the latest timestamp statistics."""
query = query_pb2.Query()
if namespace is None:
query.kind.add().name = '__Stat_Total__'
else:
query.kind.add().name = '__Stat_Ns_Total__'
# Descending order of `timestamp`
datastore_helper.add_property_orders(query, "-timestamp")
# Only get the latest entity
query.limit.value = 1
return query
def make_kind_stats_query(namespace, kind, latest_timestamp):
"""Make a Query to fetch the latest kind statistics."""
kind_stat_query = query_pb2.Query()
if namespace is None:
kind_stat_query.kind.add().name = '__Stat_Kind__'
else:
kind_stat_query.kind.add().name = '__Stat_Ns_Kind__'
kind_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'kind_name', PropertyFilter.EQUAL, unicode(kind))
timestamp_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'timestamp', PropertyFilter.EQUAL,
latest_timestamp)
datastore_helper.set_composite_filter(kind_stat_query.filter,
CompositeFilter.AND, kind_filter,
timestamp_filter)
return kind_stat_query
class QueryIterator(object):
"""A iterator class for entities of a given query.
Entities are read in batches. Retries on failures.
"""
_NOT_FINISHED = query_pb2.QueryResultBatch.NOT_FINISHED
# Maximum number of results to request per query.
_BATCH_SIZE = 500
def __init__(self, project, namespace, query, datastore):
self._query = query
self._datastore = datastore
self._project = project
self._namespace = namespace
self._start_cursor = None
self._limit = self._query.limit.value or sys.maxint
self._req = make_request(project, namespace, query)
@retry.with_exponential_backoff(num_retries=5,
retry_filter=retry_on_rpc_error)
def _next_batch(self):
"""Fetches the next batch of entities."""
if self._start_cursor is not None:
self._req.query.start_cursor = self._start_cursor
# set batch size
self._req.query.limit.value = min(self._BATCH_SIZE, self._limit)
resp = self._datastore.run_query(self._req)
return resp
def __iter__(self):
more_results = True
while more_results:
resp = self._next_batch()
for entity_result in resp.batch.entity_results:
yield entity_result.entity
self._start_cursor = resp.batch.end_cursor
num_results = len(resp.batch.entity_results)
self._limit -= num_results
# Check if we need to read more entities.
# True when query limit hasn't been satisfied and there are more entities
# to be read. The latter is true if the response has a status
# `NOT_FINISHED` or if the number of results read in the previous batch
# is equal to `_BATCH_SIZE` (all indications that there is more data be
# read).
more_results = ((self._limit > 0) and
((num_results == self._BATCH_SIZE) or
(resp.batch.more_results == self._NOT_FINISHED)))
| {
"content_hash": "64a16d4830bb6624c1b0ed6f4ad0c0f3",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 29.812,
"alnum_prop": 0.6846907285656783,
"repo_name": "axbaretto/beam",
"id": "1497862a0b903033e6b0c28788589833fba15d85",
"size": "8238",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/datastore/v1/helper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""Signs and aligns an APK."""
import argparse
import logging
import shutil
import subprocess
import sys
import tempfile
from util import build_utils
def FinalizeApk(apksigner_path,
zipalign_path,
unsigned_apk_path,
final_apk_path,
key_path,
key_passwd,
key_name,
min_sdk_version,
warnings_as_errors=False):
# Use a tempfile so that Ctrl-C does not leave the file with a fresh mtime
# and a corrupted state.
with tempfile.NamedTemporaryFile() as staging_file:
if zipalign_path:
# v2 signing requires that zipalign happen first.
logging.debug('Running zipalign')
zipalign_cmd = [
zipalign_path, '-p', '-f', '4', unsigned_apk_path, staging_file.name
]
build_utils.CheckOutput(zipalign_cmd,
print_stdout=True,
fail_on_output=warnings_as_errors)
signer_input_path = staging_file.name
else:
signer_input_path = unsigned_apk_path
sign_cmd = build_utils.JavaCmd(warnings_as_errors) + [
'-jar',
apksigner_path,
'sign',
'--in',
signer_input_path,
'--out',
staging_file.name,
'--ks',
key_path,
'--ks-key-alias',
key_name,
'--ks-pass',
'pass:' + key_passwd,
]
# V3 signing adds security niceties, which are irrelevant for local builds.
sign_cmd += ['--v3-signing-enabled', 'false']
if min_sdk_version >= 24:
# Disable v1 signatures when v2 signing can be used (it's much faster).
# By default, both v1 and v2 signing happen.
sign_cmd += ['--v1-signing-enabled', 'false']
else:
# Force SHA-1 (makes signing faster; insecure is fine for local builds).
# Leave v2 signing enabled since it verifies faster on device when
# supported.
sign_cmd += ['--min-sdk-version', '1']
logging.debug('Signing apk')
build_utils.CheckOutput(sign_cmd,
print_stdout=True,
fail_on_output=warnings_as_errors)
shutil.move(staging_file.name, final_apk_path)
# TODO(crbug.com/1174969): Remove this once Python2 is obsoleted.
if sys.version_info.major == 2:
staging_file.delete = False
else:
staging_file._closer.delete = False
| {
"content_hash": "820e340eec489de3f552b4e03ed61337",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 32.17333333333333,
"alnum_prop": 0.5797762121840033,
"repo_name": "nwjs/chromium.src",
"id": "f7581c7e9429034c1e297a80a754cbba57bbb6eb",
"size": "2553",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "build/android/gyp/finalize_apk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('kbadge', '0002_auto_20181023_1319'),
]
operations = [
migrations.AlterModelManagers(
name='award',
managers=[
('admin_objects', django.db.models.manager.Manager()),
],
),
]
| {
"content_hash": "8fd81d0a698dc088d51856ce8fc2b816",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 21.65,
"alnum_prop": 0.581986143187067,
"repo_name": "mozilla/kitsune",
"id": "2553c4671de1a0d7229d2f170959e207d47271fe",
"size": "507",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "kitsune/kbadge/migrations/0003_auto_20190816_1824.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
"""Tests for msgutil module."""
import array
import Queue
import struct
import unittest
import zlib
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
from mod_pywebsocket.extensions import PerFrameCompressionExtensionProcessor
from mod_pywebsocket.extensions import PerMessageCompressionExtensionProcessor
from mod_pywebsocket import msgutil
from mod_pywebsocket.stream import InvalidUTF8Exception
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
from test import mock
# We use one fixed nonce for testing instead of cryptographically secure PRNG.
_MASKING_NONCE = 'ABCD'
def _mask_hybi(frame):
frame_key = map(ord, _MASKING_NONCE)
frame_key_len = len(frame_key)
result = array.array('B')
result.fromstring(frame)
count = 0
for i in xrange(len(result)):
result[i] ^= frame_key[count]
count = (count + 1) % frame_key_len
return _MASKING_NONCE + result.tostring()
def _install_extension_processor(processor, request, stream_options):
response = processor.get_extension_response()
if response is not None:
processor.setup_stream_options(stream_options)
request.ws_extension_processors.append(processor)
def _create_request_from_rawdata(
read_data, deflate_stream=False, deflate_frame_request=None,
perframe_compression_request=None, permessage_compression_request=None):
req = mock.MockRequest(connection=mock.MockConn(''.join(read_data)))
req.ws_version = common.VERSION_HYBI_LATEST
stream_options = StreamOptions()
stream_options.deflate_stream = deflate_stream
req.ws_extension_processors = []
if deflate_frame_request is not None:
processor = DeflateFrameExtensionProcessor(deflate_frame_request)
_install_extension_processor(processor, req, stream_options)
elif perframe_compression_request is not None:
processor = PerFrameCompressionExtensionProcessor(
perframe_compression_request)
_install_extension_processor(processor, req, stream_options)
elif permessage_compression_request is not None:
processor = PerMessageCompressionExtensionProcessor(
permessage_compression_request)
_install_extension_processor(processor, req, stream_options)
req.ws_stream = Stream(req, stream_options)
return req
def _create_request(*frames):
"""Creates MockRequest using data given as frames.
frames will be returned on calling request.connection.read() where request
is MockRequest returned by this function.
"""
read_data = []
for (header, body) in frames:
read_data.append(header + _mask_hybi(body))
return _create_request_from_rawdata(read_data)
def _create_blocking_request():
"""Creates MockRequest.
Data written to a MockRequest can be read out by calling
request.connection.written_data().
"""
req = mock.MockRequest(connection=mock.MockBlockingConn())
req.ws_version = common.VERSION_HYBI_LATEST
stream_options = StreamOptions()
req.ws_stream = Stream(req, stream_options)
return req
def _create_request_hixie75(read_data=''):
req = mock.MockRequest(connection=mock.MockConn(read_data))
req.ws_stream = StreamHixie75(req)
return req
def _create_blocking_request_hixie75():
req = mock.MockRequest(connection=mock.MockBlockingConn())
req.ws_stream = StreamHixie75(req)
return req
class MessageTest(unittest.TestCase):
# Tests for Stream
def test_send_message(self):
request = _create_request()
msgutil.send_message(request, 'Hello')
self.assertEqual('\x81\x05Hello', request.connection.written_data())
payload = 'a' * 125
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7d' + payload,
request.connection.written_data())
def test_send_medium_message(self):
payload = 'a' * 126
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7e\x00\x7e' + payload,
request.connection.written_data())
payload = 'a' * ((1 << 16) - 1)
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7e\xff\xff' + payload,
request.connection.written_data())
def test_send_large_message(self):
payload = 'a' * (1 << 16)
request = _create_request()
msgutil.send_message(request, payload)
self.assertEqual('\x81\x7f\x00\x00\x00\x00\x00\x01\x00\x00' + payload,
request.connection.written_data())
def test_send_message_unicode(self):
request = _create_request()
msgutil.send_message(request, u'\u65e5')
# U+65e5 is encoded as e6,97,a5 in UTF-8
self.assertEqual('\x81\x03\xe6\x97\xa5',
request.connection.written_data())
def test_send_message_fragments(self):
request = _create_request()
msgutil.send_message(request, 'Hello', False)
msgutil.send_message(request, ' ', False)
msgutil.send_message(request, 'World', False)
msgutil.send_message(request, '!', True)
self.assertEqual('\x01\x05Hello\x00\x01 \x00\x05World\x80\x01!',
request.connection.written_data())
def test_send_fragments_immediate_zero_termination(self):
request = _create_request()
msgutil.send_message(request, 'Hello World!', False)
msgutil.send_message(request, '', True)
self.assertEqual('\x01\x0cHello World!\x80\x00',
request.connection.written_data())
def test_send_message_deflate_stream(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
request = _create_request_from_rawdata('', deflate_stream=True)
msgutil.send_message(request, 'Hello')
expected = compress.compress('\x81\x05Hello')
expected += compress.flush(zlib.Z_SYNC_FLUSH)
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame_bfinal(self):
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
self.assertEquals(1, len(request.ws_extension_processors))
deflate_frame_processor = request.ws_extension_processors[0]
deflate_frame_processor.set_bfinal(True)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_FINISH)
compressed_hello = compressed_hello + chr(0)
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_FINISH)
compressed_world = compressed_world + chr(0)
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame_comp_bit(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
self.assertEquals(1, len(request.ws_extension_processors))
deflate_frame_processor = request.ws_extension_processors[0]
msgutil.send_message(request, 'Hello')
deflate_frame_processor.disable_outgoing_compression()
msgutil.send_message(request, 'Hello')
deflate_frame_processor.enable_outgoing_compression()
msgutil.send_message(request, 'Hello')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
expected += '\x81\x05Hello'
compressed_2nd_hello = compress.compress('Hello')
compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_2nd_hello = compressed_2nd_hello[:-4]
expected += '\xc1%c' % len(compressed_2nd_hello)
expected += compressed_2nd_hello
self.assertEqual(expected, request.connection.written_data())
def test_send_message_deflate_frame_no_context_takeover_parameter(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
extension.add_parameter('no_context_takeover', None)
request = _create_request_from_rawdata(
'', deflate_frame_request=extension)
for i in xrange(3):
msgutil.send_message(request, 'Hello')
compressed_message = compress.compress('Hello')
compressed_message += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_message = compressed_message[:-4]
expected = '\xc1%c' % len(compressed_message)
expected += compressed_message
self.assertEqual(
expected + expected + expected, request.connection.written_data())
def test_deflate_frame_bad_request_parameters(self):
"""Tests that if there's anything wrong with deflate-frame extension
request, deflate-frame is rejected.
"""
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# max_window_bits less than 8 is illegal.
extension.add_parameter('max_window_bits', '7')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# max_window_bits greater than 15 is illegal.
extension.add_parameter('max_window_bits', '16')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# Non integer max_window_bits is illegal.
extension.add_parameter('max_window_bits', 'foobar')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
# no_context_takeover must not have any value.
extension.add_parameter('no_context_takeover', 'foobar')
processor = DeflateFrameExtensionProcessor(extension)
self.assertEqual(None, processor.get_extension_response())
def test_deflate_frame_response_parameters(self):
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
processor = DeflateFrameExtensionProcessor(extension)
processor.set_response_window_bits(8)
response = processor.get_extension_response()
self.assertTrue(response.has_parameter('max_window_bits'))
self.assertEqual('8', response.get_parameter_value('max_window_bits'))
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
processor = DeflateFrameExtensionProcessor(extension)
processor.set_response_no_context_takeover(True)
response = processor.get_extension_response()
self.assertTrue(response.has_parameter('no_context_takeover'))
self.assertTrue(
response.get_parameter_value('no_context_takeover') is None)
def test_permessage_compress_deflate_response_parameters(self):
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
processor = PerMessageCompressionExtensionProcessor(extension)
response = processor.get_extension_response()
self.assertEqual('deflate',
response.get_parameter_value('method'))
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
processor = PerMessageCompressionExtensionProcessor(extension)
def _compression_processor_hook(compression_processor):
compression_processor.set_c2s_max_window_bits(8)
compression_processor.set_c2s_no_context_takeover(True)
processor.set_compression_processor_hook(
_compression_processor_hook)
response = processor.get_extension_response()
self.assertEqual(
'deflate; c2s_max_window_bits=8; c2s_no_context_takeover',
response.get_parameter_value('method'))
def test_send_message_perframe_compress_deflate(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(
common.PERFRAME_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', perframe_compression_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_permessage_compress_deflate(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', permessage_compression_request=extension)
msgutil.send_message(request, 'Hello')
msgutil.send_message(request, 'World')
expected = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
expected += '\xc1%c' % len(compressed_hello)
expected += compressed_hello
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
expected += '\xc1%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_send_message_permessage_compress_deflate_fragmented(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', permessage_compression_request=extension)
msgutil.send_message(request, 'Hello', end=False)
msgutil.send_message(request, 'World', end=True)
expected = ''
# The first frame will be an empty frame and FIN=0 and RSV1=1.
expected += '\x41\x00'
compressed_message = compress.compress('HelloWorld')
compressed_message += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_message = compressed_message[:-4]
expected += '\x80%c' % len(compressed_message)
expected += compressed_message
self.assertEqual(expected, request.connection.written_data())
def test_send_message_permessage_compress_deflate_fragmented_bfinal(self):
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
'', permessage_compression_request=extension)
self.assertEquals(1, len(request.ws_extension_processors))
compression_processor = (
request.ws_extension_processors[0].get_compression_processor())
compression_processor.set_bfinal(True)
msgutil.send_message(request, 'Hello', end=False)
msgutil.send_message(request, 'World', end=True)
expected = ''
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_FINISH)
compressed_hello = compressed_hello + chr(0)
expected += '\x41%c' % len(compressed_hello)
expected += compressed_hello
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_FINISH)
compressed_world = compressed_world + chr(0)
expected += '\x80%c' % len(compressed_world)
expected += compressed_world
self.assertEqual(expected, request.connection.written_data())
def test_receive_message(self):
request = _create_request(
('\x81\x85', 'Hello'), ('\x81\x86', 'World!'))
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
payload = 'a' * 125
request = _create_request(('\x81\xfd', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_medium_message(self):
payload = 'a' * 126
request = _create_request(('\x81\xfe\x00\x7e', payload))
self.assertEqual(payload, msgutil.receive_message(request))
payload = 'a' * ((1 << 16) - 1)
request = _create_request(('\x81\xfe\xff\xff', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_large_message(self):
payload = 'a' * (1 << 16)
request = _create_request(
('\x81\xff\x00\x00\x00\x00\x00\x01\x00\x00', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_length_not_encoded_using_minimal_number_of_bytes(self):
# Log warning on receiving bad payload length field that doesn't use
# minimal number of bytes but continue processing.
payload = 'a'
# 1 byte can be represented without extended payload length field.
request = _create_request(
('\x81\xff\x00\x00\x00\x00\x00\x00\x00\x01', payload))
self.assertEqual(payload, msgutil.receive_message(request))
def test_receive_message_unicode(self):
request = _create_request(('\x81\x83', '\xe6\x9c\xac'))
# U+672c is encoded as e6,9c,ac in UTF-8
self.assertEqual(u'\u672c', msgutil.receive_message(request))
def test_receive_message_erroneous_unicode(self):
# \x80 and \x81 are invalid as UTF-8.
request = _create_request(('\x81\x82', '\x80\x81'))
# Invalid characters should raise InvalidUTF8Exception
self.assertRaises(InvalidUTF8Exception,
msgutil.receive_message,
request)
def test_receive_fragments(self):
request = _create_request(
('\x01\x85', 'Hello'),
('\x00\x81', ' '),
('\x00\x85', 'World'),
('\x80\x81', '!'))
self.assertEqual('Hello World!', msgutil.receive_message(request))
def test_receive_fragments_unicode(self):
# UTF-8 encodes U+6f22 into e6bca2 and U+5b57 into e5ad97.
request = _create_request(
('\x01\x82', '\xe6\xbc'),
('\x00\x82', '\xa2\xe5'),
('\x80\x82', '\xad\x97'))
self.assertEqual(u'\u6f22\u5b57', msgutil.receive_message(request))
def test_receive_fragments_immediate_zero_termination(self):
request = _create_request(
('\x01\x8c', 'Hello World!'), ('\x80\x80', ''))
self.assertEqual('Hello World!', msgutil.receive_message(request))
def test_receive_fragments_duplicate_start(self):
request = _create_request(
('\x01\x85', 'Hello'), ('\x01\x85', 'World'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_fragments_intermediate_but_not_started(self):
request = _create_request(('\x00\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_fragments_end_but_not_started(self):
request = _create_request(('\x80\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_message_discard(self):
request = _create_request(
('\x8f\x86', 'IGNORE'), ('\x81\x85', 'Hello'),
('\x8f\x89', 'DISREGARD'), ('\x81\x86', 'World!'))
self.assertRaises(msgutil.UnsupportedFrameException,
msgutil.receive_message, request)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertRaises(msgutil.UnsupportedFrameException,
msgutil.receive_message, request)
self.assertEqual('World!', msgutil.receive_message(request))
def test_receive_close(self):
request = _create_request(
('\x88\x8a', struct.pack('!H', 1000) + 'Good bye'))
self.assertEqual(None, msgutil.receive_message(request))
self.assertEqual(1000, request.ws_close_code)
self.assertEqual('Good bye', request.ws_close_reason)
def test_receive_message_deflate_stream(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress('\x81\x85' + _mask_hybi('Hello'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
data += compress.compress('\x81\x89' + _mask_hybi('WebSocket'))
data += compress.flush(zlib.Z_FINISH)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data += compress.compress('\x81\x85' + _mask_hybi('World'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
# Close frame
data += compress.compress(
'\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye'))
data += compress.flush(zlib.Z_SYNC_FLUSH)
request = _create_request_from_rawdata(data, deflate_stream=True)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertFalse(request.drain_received_data_called)
self.assertEqual(None, msgutil.receive_message(request))
self.assertTrue(request.drain_received_data_called)
def test_receive_message_deflate_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
compressed_websocket = compress.compress('WebSocket')
compressed_websocket += compress.flush(zlib.Z_FINISH)
compressed_websocket += '\x00'
data += '\xc1%c' % (len(compressed_websocket) | 0x80)
data += _mask_hybi(compressed_websocket)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_deflate_frame_client_using_smaller_window(self):
"""Test that frames coming from a client which is using smaller window
size that the server are correctly received.
"""
# Using the smallest window bits of 8 for generating input frames.
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -8)
data = ''
# Use a frame whose content is bigger than the clients' DEFLATE window
# size before compression. The content mainly consists of 'a' but
# repetition of 'b' is put at the head and tail so that if the window
# size is big, the head is back-referenced but if small, not.
payload = 'b' * 64 + 'a' * 1024 + 'b' * 64
compressed_hello = compress.compress(payload)
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
self.assertEqual(payload, msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_deflate_frame_comp_bit(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
data += '\x81\x85' + _mask_hybi('Hello')
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_2nd_hello = compress.compress('Hello')
compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_2nd_hello = compressed_2nd_hello[:-4]
data += '\xc1%c' % (len(compressed_2nd_hello) | 0x80)
data += _mask_hybi(compressed_2nd_hello)
extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
request = _create_request_from_rawdata(
data, deflate_frame_request=extension)
for i in xrange(3):
self.assertEqual('Hello', msgutil.receive_message(request))
def test_receive_message_perframe_compression_frame(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('Hello')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
data += '\xc1%c' % (len(compressed_hello) | 0x80)
data += _mask_hybi(compressed_hello)
compressed_websocket = compress.compress('WebSocket')
compressed_websocket += compress.flush(zlib.Z_FINISH)
compressed_websocket += '\x00'
data += '\xc1%c' % (len(compressed_websocket) | 0x80)
data += _mask_hybi(compressed_websocket)
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(
common.PERFRAME_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
data, perframe_compression_request=extension)
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('WebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_receive_message_permessage_deflate_compression(self):
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
data = ''
compressed_hello = compress.compress('HelloWebSocket')
compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_hello = compressed_hello[:-4]
split_position = len(compressed_hello) / 2
data += '\x41%c' % (split_position | 0x80)
data += _mask_hybi(compressed_hello[:split_position])
data += '\x80%c' % ((len(compressed_hello) - split_position) | 0x80)
data += _mask_hybi(compressed_hello[split_position:])
compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_world = compress.compress('World')
compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
compressed_world = compressed_world[:-4]
data += '\xc1%c' % (len(compressed_world) | 0x80)
data += _mask_hybi(compressed_world)
# Close frame
data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
extension = common.ExtensionParameter(
common.PERMESSAGE_COMPRESSION_EXTENSION)
extension.add_parameter('method', 'deflate')
request = _create_request_from_rawdata(
data, permessage_compression_request=extension)
self.assertEqual('HelloWebSocket', msgutil.receive_message(request))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual(None, msgutil.receive_message(request))
def test_send_longest_close(self):
reason = 'a' * 123
request = _create_request(
('\x88\xfd',
struct.pack('!H', common.STATUS_NORMAL_CLOSURE) + reason))
request.ws_stream.close_connection(common.STATUS_NORMAL_CLOSURE,
reason)
self.assertEqual(request.ws_close_code, common.STATUS_NORMAL_CLOSURE)
self.assertEqual(request.ws_close_reason, reason)
def test_send_close_too_long(self):
request = _create_request()
self.assertRaises(msgutil.BadOperationException,
Stream.close_connection,
request.ws_stream,
common.STATUS_NORMAL_CLOSURE,
'a' * 124)
def test_send_close_inconsistent_code_and_reason(self):
request = _create_request()
# reason parameter must not be specified when code is None.
self.assertRaises(msgutil.BadOperationException,
Stream.close_connection,
request.ws_stream,
None,
'a')
def test_send_ping(self):
request = _create_request()
msgutil.send_ping(request, 'Hello World!')
self.assertEqual('\x89\x0cHello World!',
request.connection.written_data())
def test_send_longest_ping(self):
request = _create_request()
msgutil.send_ping(request, 'a' * 125)
self.assertEqual('\x89\x7d' + 'a' * 125,
request.connection.written_data())
def test_send_ping_too_long(self):
request = _create_request()
self.assertRaises(msgutil.BadOperationException,
msgutil.send_ping,
request,
'a' * 126)
def test_receive_ping(self):
"""Tests receiving a ping control frame."""
def handler(request, message):
request.called = True
# Stream automatically respond to ping with pong without any action
# by application layer.
request = _create_request(
('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual('\x8a\x05Hello',
request.connection.written_data())
request = _create_request(
('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
request.on_ping_handler = handler
self.assertEqual('World', msgutil.receive_message(request))
self.assertTrue(request.called)
def test_receive_longest_ping(self):
request = _create_request(
('\x89\xfd', 'a' * 125), ('\x81\x85', 'World'))
self.assertEqual('World', msgutil.receive_message(request))
self.assertEqual('\x8a\x7d' + 'a' * 125,
request.connection.written_data())
def test_receive_ping_too_long(self):
request = _create_request(('\x89\xfe\x00\x7e', 'a' * 126))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_receive_pong(self):
"""Tests receiving a pong control frame."""
def handler(request, message):
request.called = True
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
request.on_pong_handler = handler
msgutil.send_ping(request, 'Hello')
self.assertEqual('\x89\x05Hello',
request.connection.written_data())
# Valid pong is received, but receive_message won't return for it.
self.assertEqual('World', msgutil.receive_message(request))
# Check that nothing was written after receive_message call.
self.assertEqual('\x89\x05Hello',
request.connection.written_data())
self.assertTrue(request.called)
def test_receive_unsolicited_pong(self):
# Unsolicited pong is allowed from HyBi 07.
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
msgutil.receive_message(request)
request = _create_request(
('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
msgutil.send_ping(request, 'Jumbo')
# Body mismatch.
msgutil.receive_message(request)
def test_ping_cannot_be_fragmented(self):
request = _create_request(('\x09\x85', 'Hello'))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
def test_ping_with_too_long_payload(self):
request = _create_request(('\x89\xfe\x01\x00', 'a' * 256))
self.assertRaises(msgutil.InvalidFrameException,
msgutil.receive_message,
request)
class MessageTestHixie75(unittest.TestCase):
"""Tests for draft-hixie-thewebsocketprotocol-76 stream class."""
def test_send_message(self):
request = _create_request_hixie75()
msgutil.send_message(request, 'Hello')
self.assertEqual('\x00Hello\xff', request.connection.written_data())
def test_send_message_unicode(self):
request = _create_request_hixie75()
msgutil.send_message(request, u'\u65e5')
# U+65e5 is encoded as e6,97,a5 in UTF-8
self.assertEqual('\x00\xe6\x97\xa5\xff',
request.connection.written_data())
def test_receive_message(self):
request = _create_request_hixie75('\x00Hello\xff\x00World!\xff')
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
def test_receive_message_unicode(self):
request = _create_request_hixie75('\x00\xe6\x9c\xac\xff')
# U+672c is encoded as e6,9c,ac in UTF-8
self.assertEqual(u'\u672c', msgutil.receive_message(request))
def test_receive_message_erroneous_unicode(self):
# \x80 and \x81 are invalid as UTF-8.
request = _create_request_hixie75('\x00\x80\x81\xff')
# Invalid characters should be replaced with
# U+fffd REPLACEMENT CHARACTER
self.assertEqual(u'\ufffd\ufffd', msgutil.receive_message(request))
def test_receive_message_discard(self):
request = _create_request_hixie75('\x80\x06IGNORE\x00Hello\xff'
'\x01DISREGARD\xff\x00World!\xff')
self.assertEqual('Hello', msgutil.receive_message(request))
self.assertEqual('World!', msgutil.receive_message(request))
class MessageReceiverTest(unittest.TestCase):
"""Tests the Stream class using MessageReceiver."""
def test_queue(self):
request = _create_blocking_request()
receiver = msgutil.MessageReceiver(request)
self.assertEqual(None, receiver.receive_nowait())
request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
self.assertEqual('Hello!', receiver.receive())
def test_onmessage(self):
onmessage_queue = Queue.Queue()
def onmessage_handler(message):
onmessage_queue.put(message)
request = _create_blocking_request()
receiver = msgutil.MessageReceiver(request, onmessage_handler)
request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
self.assertEqual('Hello!', onmessage_queue.get())
class MessageReceiverHixie75Test(unittest.TestCase):
"""Tests the StreamHixie75 class using MessageReceiver."""
def test_queue(self):
request = _create_blocking_request_hixie75()
receiver = msgutil.MessageReceiver(request)
self.assertEqual(None, receiver.receive_nowait())
request.connection.put_bytes('\x00Hello!\xff')
self.assertEqual('Hello!', receiver.receive())
def test_onmessage(self):
onmessage_queue = Queue.Queue()
def onmessage_handler(message):
onmessage_queue.put(message)
request = _create_blocking_request_hixie75()
receiver = msgutil.MessageReceiver(request, onmessage_handler)
request.connection.put_bytes('\x00Hello!\xff')
self.assertEqual('Hello!', onmessage_queue.get())
class MessageSenderTest(unittest.TestCase):
"""Tests the Stream class using MessageSender."""
def test_send(self):
request = _create_blocking_request()
sender = msgutil.MessageSender(request)
sender.send('World')
self.assertEqual('\x81\x05World', request.connection.written_data())
def test_send_nowait(self):
# Use a queue to check the bytes written by MessageSender.
# request.connection.written_data() cannot be used here because
# MessageSender runs in a separate thread.
send_queue = Queue.Queue()
def write(bytes):
send_queue.put(bytes)
request = _create_blocking_request()
request.connection.write = write
sender = msgutil.MessageSender(request)
sender.send_nowait('Hello')
sender.send_nowait('World')
self.assertEqual('\x81\x05Hello', send_queue.get())
self.assertEqual('\x81\x05World', send_queue.get())
class MessageSenderHixie75Test(unittest.TestCase):
"""Tests the StreamHixie75 class using MessageSender."""
def test_send(self):
request = _create_blocking_request_hixie75()
sender = msgutil.MessageSender(request)
sender.send('World')
self.assertEqual('\x00World\xff', request.connection.written_data())
def test_send_nowait(self):
# Use a queue to check the bytes written by MessageSender.
# request.connection.written_data() cannot be used here because
# MessageSender runs in a separate thread.
send_queue = Queue.Queue()
def write(bytes):
send_queue.put(bytes)
request = _create_blocking_request_hixie75()
request.connection.write = write
sender = msgutil.MessageSender(request)
sender.send_nowait('Hello')
sender.send_nowait('World')
self.assertEqual('\x00Hello\xff', send_queue.get())
self.assertEqual('\x00World\xff', send_queue.get())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| {
"content_hash": "5da380ad8c138a11aebaff2a2c739eac",
"timestamp": "",
"source": "github",
"line_count": 1059,
"max_line_length": 78,
"avg_line_length": 40.961284230406044,
"alnum_prop": 0.6391488773110794,
"repo_name": "XiaonuoGantan/pywebsocket",
"id": "4a6922f07cb30d06851404e13f5d78cefc7baf38",
"size": "44932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_src/test/test_msgutil.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1568"
},
{
"name": "Python",
"bytes": "655896"
}
],
"symlink_target": ""
} |
import time
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
import factory.django
from factory.django import DjangoModelFactory
from django.apps import apps
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.db.utils import IntegrityError
from faker import Factory
from waffle.models import Flag, Sample, Switch
from website.notifications.constants import NOTIFICATION_TYPES
from osf.utils import permissions
from website.archiver import ARCHIVER_SUCCESS
from website.settings import FAKE_EMAIL_NAME, FAKE_EMAIL_DOMAIN
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.models.storage import PROVIDER_ASSET_NAME_CHOICES
from osf.utils.names import impute_names_model
from osf.utils.workflows import DefaultStates, DefaultTriggers
from addons.osfstorage.models import OsfStorageFile, Region
fake = Factory.create()
# If tests are run on really old processors without high precision this might fail. Unlikely to occur.
fake_email = lambda: '{}+{}@{}'.format(FAKE_EMAIL_NAME, int(time.clock() * 1000000), FAKE_EMAIL_DOMAIN)
# Do this out of a cls context to avoid setting "t" as a local
PROVIDER_ASSET_NAME_CHOICES = tuple([t[0] for t in PROVIDER_ASSET_NAME_CHOICES])
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
return models.RegistrationSchema.objects.first()
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.LazyFunction(fake_email)
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._build(target_class, *args, **kwargs)
if emails:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._create(target_class, *args, **kwargs)
if emails and not instance.pk:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
@factory.post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
if create:
# Perform implicit save to populate M2M
self.save(clean=False)
else:
# This might lead to strange behavior
return
self.emails.create(address=str(self.username).lower())
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
"""Build an object without saving it."""
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
#Fix for adding the deleted date.
@classmethod
def _create(cls, *args, **kwargs):
if kwargs.get('is_deleted', None):
kwargs['deleted'] = timezone.now()
return super(BaseNodeFactory, cls)._create(*args, **kwargs)
class ProjectFactory(BaseNodeFactory):
category = 'project'
class DraftNodeFactory(BaseNodeFactory):
category = 'project'
class Meta:
model = models.DraftNode
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
"""Build an object without saving it."""
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.objects.get(name='No license')
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
params = {'path': '/'}
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Sequence(lambda n: 'Example Private Link #{}'.format(n))
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(PrivateLinkFactory, cls)._create(target_class, *args, **kwargs)
if instance.is_deleted and not instance.deleted:
instance.deleted = timezone.now()
instance.save()
return instance
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
@classmethod
def _create(cls, *args, **kwargs):
collected_types = kwargs.pop('collected_types', ContentType.objects.filter(app_label='osf', model__in=['abstractnode', 'basefilenode', 'collection', 'preprint']))
obj = cls._build(*args, **kwargs)
obj.save()
# M2M, requires initial save
obj.collected_types.add(*collected_types)
return obj
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class CollectionProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.CollectionProvider
@classmethod
def _create(cls, *args, **kwargs):
user = kwargs.pop('creator', None)
obj = cls._build(*args, **kwargs)
obj._creator = user or UserFactory() # Generates primary_collection
obj.save()
return obj
class RegistrationProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
access_token = factory.Faker('bs')
share_source = factory.Sequence(lambda n: 'share source #{0}'.format(n))
class Meta:
model = models.RegistrationProvider
@classmethod
def _create(cls, *args, **kwargs):
user = kwargs.pop('creator', None)
_id = kwargs.pop('_id', None)
try:
obj = cls._build(*args, **kwargs)
except IntegrityError as e:
# This is to ensure legacy tests don't fail when their _ids aren't unique
if _id == models.RegistrationProvider.default__id:
pass
else:
raise e
if _id and _id != 'osf':
obj._id = _id
obj._creator = user or models.OSFUser.objects.first() or UserFactory() # Generates primary_collection
obj.save()
return obj
class OSFGroupFactory(DjangoModelFactory):
name = factory.Faker('company')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.OSFGroup
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, draft_registration=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
provider=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
provider = provider or models.RegistrationProvider.get_default()
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.is_admin_contributor(user):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
if not draft_registration:
draft_registration = DraftRegistrationFactory(
branched_from=project,
initator=user,
registration_schema=schema,
provider=provider
)
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
draft_registration=draft_registration,
provider=provider,
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
archive_job = reg.archive_job
archive_job.status = ARCHIVER_SUCCESS
archive_job.done = True
archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
if is_public:
reg.is_public = True
reg.files_count = reg.registered_from.files.filter(deleted_on__isnull=True).count()
draft_registration.registered_node = reg
draft_registration.save()
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = list(withdrawal.approval_state.values())[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.EmailApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(user)
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
title = kwargs.pop('title', None)
initiator = kwargs.get('initiator', None)
description = kwargs.pop('description', None)
branched_from = kwargs.get('branched_from', None)
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
provider = kwargs.get('provider')
branched_from_creator = branched_from.creator if branched_from else None
initiator = initiator or branched_from_creator or kwargs.get('user', None) or kwargs.get('creator', None) or UserFactory()
registration_schema = registration_schema or get_default_metaschema()
registration_metadata = registration_metadata or {}
provider = provider or models.RegistrationProvider.get_default()
provider.schemas.add(registration_schema)
draft = models.DraftRegistration.create_from_node(
node=branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
provider=provider,
)
if title:
draft.title = title
if description:
draft.description = description
draft.registration_responses = draft.flatten_registration_metadata()
draft.save()
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
_id = factory.Sequence(lambda n: f'slug{n}')
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
share_source = factory.Sequence(lambda n: 'share source #{0}'.format(n))
class Meta:
model = models.PreprintProvider
@classmethod
def _build(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._build(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._create(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
instance.save()
return instance
def sync_set_identifiers(preprint):
from website import settings
doi = settings.DOI_FORMAT.format(prefix=preprint.provider.doi_prefix, guid=preprint._id)
preprint.set_identifier_values(doi=doi)
class PreprintFactory(DjangoModelFactory):
class Meta:
model = models.Preprint
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
@classmethod
def _build(cls, target_class, *args, **kwargs):
creator = kwargs.pop('creator', None) or UserFactory()
provider = kwargs.pop('provider', None) or PreprintProviderFactory()
project = kwargs.pop('project', None) or None
title = kwargs.pop('title', None) or 'Untitled'
description = kwargs.pop('description', None) or 'None'
is_public = kwargs.pop('is_public', True)
instance = target_class(provider=provider, title=title, description=description, creator=creator, node=project, is_public=is_public)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
update_task_patcher = mock.patch('website.preprints.tasks.on_preprint_updated.si')
update_task_patcher.start()
finish = kwargs.pop('finish', True)
set_doi = kwargs.pop('set_doi', True)
is_published = kwargs.pop('is_published', True)
instance = cls._build(target_class, *args, **kwargs)
file_size = kwargs.pop('file_size', 1337)
doi = kwargs.pop('doi', None)
license_details = kwargs.pop('license_details', None)
filename = kwargs.pop('filename', None) or 'preprint_file.txt'
subjects = kwargs.pop('subjects', None) or [[SubjectFactory()._id]]
instance.article_doi = doi
user = kwargs.pop('creator', None) or instance.creator
instance.save()
preprint_file = OsfStorageFile.create(
target_object_id=instance.id,
target_content_type=ContentType.objects.get_for_model(instance),
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
instance.machine_state = kwargs.pop('machine_state', 'initial')
preprint_file.save()
from addons.osfstorage import settings as osfstorage_settings
preprint_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': file_size,
'contentType': 'img/png'
}).save()
update_task_patcher.stop()
if finish:
auth = Auth(user)
instance.set_primary_file(preprint_file, auth=auth, save=True)
instance.set_subjects(subjects, auth=auth)
if license_details:
instance.set_preprint_license(license_details, auth=auth)
instance.set_published(is_published, auth=auth)
create_task_patcher = mock.patch('website.identifiers.utils.request_identifiers')
mock_create_identifier = create_task_patcher.start()
if is_published and set_doi:
mock_create_identifier.side_effect = sync_set_identifiers(instance)
create_task_patcher.stop()
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Sequence(lambda n: 'Example Tag #{}'.format(n))
system = False
class DismissedAlertFactory(DjangoModelFactory):
class Meta:
model = models.DismissedAlert
@classmethod
def _create(cls, *args, **kwargs):
kwargs['_id'] = kwargs.get('_id', 'adblock')
kwargs['user'] = kwargs.get('user', UserFactory())
kwargs['location'] = kwargs.get('location', 'iver/settings')
return super(DismissedAlertFactory, cls)._create(*args, **kwargs)
class ApiOAuth2ScopeFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Scope
name = factory.Sequence(lambda n: 'scope{}'.format(n))
is_public = True
is_active = True
description = factory.Faker('text')
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
token = super(ApiOAuth2PersonalTokenFactory, cls)._create(*args, **kwargs)
token.scopes.add(ApiOAuth2ScopeFactory())
return token
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = 'Mock OAuth 2.0 Provider'
short_name = 'mock2'
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'https://mock2.com/auth'
callback_url = 'https://mock2.com/callback'
auto_refresh_url = 'https://mock2.com/callback'
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins.add(*(extracted or [UserFactory()]))
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
class ReviewActionFactory(DjangoModelFactory):
class Meta:
model = models.ReviewAction
trigger = FuzzyChoice(choices=DefaultTriggers.values())
comment = factory.Faker('text')
from_state = FuzzyChoice(choices=DefaultStates.values())
to_state = FuzzyChoice(choices=DefaultStates.values())
target = factory.SubFactory(PreprintFactory)
creator = factory.SubFactory(AuthUserFactory)
is_deleted = False
class ScheduledBannerFactory(DjangoModelFactory):
# Banners are set for 24 hours from start_date if no end date is given
class Meta:
model = models.ScheduledBanner
name = factory.Faker('name')
default_alt_text = factory.Faker('text')
mobile_alt_text = factory.Faker('text')
default_photo = factory.Faker('file_name')
mobile_photo = factory.Faker('file_name')
license = factory.Faker('name')
color = factory.Faker('color')
start_date = timezone.now()
end_date = factory.LazyAttribute(lambda o: o.start_date)
class FlagFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
everyone = True
note = 'This is a waffle test flag'
class Meta:
model = Flag
class SampleFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
percent = 100
note = 'This is a waffle test sample'
class Meta:
model = Sample
class SwitchFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
active = True
note = 'This is a waffle test switch'
class Meta:
model = Switch
class NodeRequestFactory(DjangoModelFactory):
class Meta:
model = models.NodeRequest
creator = factory.SubFactory(AuthUserFactory)
target = factory.SubFactory(NodeFactory)
comment = factory.Faker('text')
class PreprintRequestFactory(DjangoModelFactory):
class Meta:
model = models.PreprintRequest
comment = factory.Faker('text')
osfstorage_settings = apps.get_app_config('addons_osfstorage')
generic_location = {
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'resource',
'object': '1615307',
}
generic_waterbutler_settings = {
'storage': {
'provider': 'glowcloud',
'container': 'osf_storage',
'use_public': True,
}
}
generic_waterbutler_credentials = {
'storage': {
'region': 'PartsUnknown',
'username': 'mankind',
'token': 'heresmrsocko'
}
}
class RegionFactory(DjangoModelFactory):
class Meta:
model = Region
name = factory.Sequence(lambda n: 'Region {0}'.format(n))
_id = factory.Sequence(lambda n: 'us_east_{0}'.format(n))
waterbutler_credentials = generic_waterbutler_credentials
waterbutler_settings = generic_waterbutler_settings
waterbutler_url = 'http://123.456.test.woo'
class ProviderAssetFileFactory(DjangoModelFactory):
class Meta:
model = models.ProviderAssetFile
name = FuzzyChoice(choices=PROVIDER_ASSET_NAME_CHOICES)
file = factory.django.FileField(filename=factory.Faker('text'))
@classmethod
def _create(cls, target_class, *args, **kwargs):
providers = kwargs.pop('providers', [])
instance = super(ProviderAssetFileFactory, cls)._create(target_class, *args, **kwargs)
instance.providers.add(*providers)
instance.save()
return instance
class ChronosJournalFactory(DjangoModelFactory):
class Meta:
model = models.ChronosJournal
name = factory.Faker('text')
title = factory.Faker('text')
journal_id = factory.Faker('ean')
@classmethod
def _create(cls, target_class, *args, **kwargs):
kwargs['raw_response'] = kwargs.get('raw_response', {
'TITLE': kwargs.get('title', factory.Faker('text').generate([])),
'JOURNAL_ID': kwargs.get('title', factory.Faker('ean').generate([])),
'NAME': kwargs.get('name', factory.Faker('text').generate([])),
'JOURNAL_URL': factory.Faker('url').generate([]),
'PUBLISHER_ID': factory.Faker('ean').generate([]),
'PUBLISHER_NAME': factory.Faker('name').generate([])
# Other stuff too probably
})
instance = super(ChronosJournalFactory, cls)._create(target_class, *args, **kwargs)
instance.save()
return instance
class ChronosSubmissionFactory(DjangoModelFactory):
class Meta:
model = models.ChronosSubmission
publication_id = factory.Faker('ean')
journal = factory.SubFactory(ChronosJournalFactory)
preprint = factory.SubFactory(PreprintFactory)
submitter = factory.SubFactory(AuthUserFactory)
status = factory.Faker('random_int', min=1, max=5)
submission_url = factory.Faker('url')
@classmethod
def _create(cls, target_class, *args, **kwargs):
kwargs['raw_response'] = kwargs.get('raw_response', {
'PUBLICATION_ID': kwargs.get('publication_id', factory.Faker('ean').generate([])),
'STATUS_CODE': kwargs.get('status', factory.Faker('random_int', min=1, max=5).generate([])),
'CHRONOS_SUBMISSION_URL': kwargs.get('submission_url', factory.Faker('url').generate([])),
# Other stuff too probably
})
instance = super(ChronosSubmissionFactory, cls)._create(target_class, *args, **kwargs)
instance.save()
return instance
class BrandFactory(DjangoModelFactory):
class Meta:
model = models.Brand
# just limiting it to 30 chars
name = factory.LazyAttribute(lambda n: fake.company()[:29])
hero_logo_image = factory.Faker('url')
topnav_logo_image = factory.Faker('url')
hero_background_image = factory.Faker('url')
primary_color = factory.Faker('hex_color')
secondary_color = factory.Faker('hex_color')
| {
"content_hash": "1301dc1221170e71bf1399b74b1cb0e4",
"timestamp": "",
"source": "github",
"line_count": 1114,
"max_line_length": 170,
"avg_line_length": 33.58886894075404,
"alnum_prop": 0.6459992516970442,
"repo_name": "baylee-d/osf.io",
"id": "a8d41b3b22155cb5e038c5374f52d118bcdcd749",
"size": "37442",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osf_tests/factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import do_change_stream_invite_only, get_client
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, UserMessage, get_realm, get_stream
class TopicHistoryTest(ZulipTestCase):
def test_topics_history_zephyr_mirror(self) -> None:
user_profile = self.mit_user('sipbtest')
stream_name = 'new_stream'
# Send a message to this new stream from another user
self.subscribe(self.mit_user("starnine"), stream_name)
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(self.mit_user("starnine"), stream_name,
topic_name="secret topic")
# Now subscribe this MIT user to the new stream and verify
# that the new topic is not accessible
self.login_user(user_profile)
self.subscribe(user_profile, stream_name)
endpoint = f'/json/users/me/{stream.id}/topics'
result = self.client_get(endpoint, dict(), subdomain="zephyr")
self.assert_json_success(result)
history = result.json()['topics']
self.assertEqual(history, [])
def test_topics_history(self) -> None:
# verified: int(UserMessage.flags.read) == 1
user_profile = self.example_user('iago')
self.login_user(user_profile)
stream_name = 'Verona'
stream = get_stream(stream_name, user_profile.realm)
recipient = stream.recipient
def create_test_message(topic: str) -> int:
# TODO: Clean this up to send messages the normal way.
hamlet = self.example_user('hamlet')
message = Message(
sender=hamlet,
recipient=recipient,
content='whatever',
date_sent=timezone_now(),
sending_client=get_client('whatever'),
)
message.set_topic_name(topic)
message.save()
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=0,
)
return message.id
# our most recent topics are topic0, topic1, topic2
# Create old messages with strange spellings.
create_test_message('topic2')
create_test_message('toPIc1')
create_test_message('toPIc0')
create_test_message('topic2')
create_test_message('topic2')
create_test_message('Topic2')
# Create new messages
topic2_msg_id = create_test_message('topic2')
create_test_message('topic1')
create_test_message('topic1')
topic1_msg_id = create_test_message('topic1')
topic0_msg_id = create_test_message('topic0')
endpoint = f'/json/users/me/{stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual([topic['name'] for topic in history], [
'topic0',
'topic1',
'topic2',
])
self.assertEqual([topic['max_id'] for topic in history], [
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
])
# Now try as cordelia, who we imagine as a totally new user in
# that she doesn't have UserMessage rows. We should see the
# same results for a public stream.
self.login('cordelia')
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual([topic['name'] for topic in history], [
'topic0',
'topic1',
'topic2',
])
self.assertIn('topic0', [topic['name'] for topic in history])
self.assertEqual([topic['max_id'] for topic in history], [
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
])
# Now make stream private, but subscribe cordelia
do_change_stream_invite_only(stream, True)
self.subscribe(self.example_user("cordelia"), stream.name)
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
history = history[:3]
# Cordelia doesn't have these recent history items when we
# wasn't subscribed in her results.
self.assertNotIn('topic0', [topic['name'] for topic in history])
self.assertNotIn('topic1', [topic['name'] for topic in history])
self.assertNotIn('topic2', [topic['name'] for topic in history])
def test_bad_stream_id(self) -> None:
self.login('iago')
# non-sensible stream id
endpoint = '/json/users/me/9999999999/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# out of realm
bad_stream = self.make_stream(
'mit_stream',
realm=get_realm('zephyr'),
)
endpoint = f'/json/users/me/{bad_stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# private stream to which I am not subscribed
private_stream = self.make_stream(
'private_stream',
invite_only=True,
)
endpoint = f'/json/users/me/{private_stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
class TopicDeleteTest(ZulipTestCase):
def test_topic_delete(self) -> None:
initial_last_msg_id = self.get_last_message().id
stream_name = 'new_stream'
topic_name = 'new topic 2'
# NON-ADMIN USER
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, stream_name)
# Send message
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Deleting the topic
self.login_user(user_profile)
endpoint = '/json/streams/' + str(stream.id) + '/delete_topic'
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_error(result, "Must be an organization administrator")
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make stream private with limited history
do_change_stream_invite_only(stream, invite_only=True,
history_public_to_subscribers=False)
# ADMIN USER subscribed now
user_profile = self.example_user('iago')
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
new_last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Now admin deletes all messages in topic -- which should only
# delete new_last_msg_id, i.e. the one sent since they joined.
self.assertEqual(self.get_last_message().id, new_last_msg_id)
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Try to delete all messages in the topic again. There are no messages accessible
# to the administrator, so this should do nothing.
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make the stream's history public to subscribers
do_change_stream_invite_only(stream, invite_only=True,
history_public_to_subscribers=True)
# Delete the topic should now remove all messages
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
# Delete again, to test the edge case of deleting an empty topic.
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
| {
"content_hash": "50377203849f75c64c07c7e609cee56f",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 100,
"avg_line_length": 38.530434782608694,
"alnum_prop": 0.6029113067027759,
"repo_name": "brainwane/zulip",
"id": "a8516d6b247639f8899816408b68030099c21d99",
"size": "8862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_message_topics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
from .years import YearPeriod
from .months import MonthPeriod
class PeriodError(Exception):
pass
def period_factory(period_type, value):
if period_type == 'month':
return MonthPeriod(value)
elif period_type == 'year':
return YearPeriod(value)
else:
raise PeriodError("Unknown period type: %s" % period_type)
def period_from_string(period_str):
if len(period_str) == 4:
return YearPeriod(period_str)
elif len(period_str) == 6:
return MonthPeriod(yearmonth=period_str)
raise PeriodError("Could not infer period from string [%s]" % period_str)
| {
"content_hash": "ded3365a58ee2a440de5242962ae4ddd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 26.73913043478261,
"alnum_prop": 0.6715447154471544,
"repo_name": "davidmarquis/pyperiods",
"id": "551d14ab450e5ad12e8ba50abfe888bdd47b362a",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyperiods/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19120"
},
{
"name": "Shell",
"bytes": "236"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from druid_node import DruidBase
class DruidCoordinator(DruidBase):
def __init__(self):
DruidBase.__init__(self, nodeType="coordinator")
if __name__ == "__main__":
DruidCoordinator().execute()
| {
"content_hash": "ef4a988902202142595ba586761ec6f9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 34.642857142857146,
"alnum_prop": 0.7680412371134021,
"repo_name": "arenadata/ambari",
"id": "a86fa405a8f9091ea3a14fdf8eec86d5d5a4aaea",
"size": "970",
"binary": false,
"copies": "3",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import KustoManagementClientConfiguration
from ._serialization import Deserializer, Serializer
from .operations import (
AttachedDatabaseConfigurationsOperations,
ClusterPrincipalAssignmentsOperations,
ClustersOperations,
DataConnectionsOperations,
DatabasePrincipalAssignmentsOperations,
DatabasesOperations,
ManagedPrivateEndpointsOperations,
Operations,
OperationsResultsLocationOperations,
OperationsResultsOperations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
ScriptsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class KustoManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""The Azure Kusto management API provides a RESTful set of web services that interact with Azure
Kusto services to manage your clusters and databases. The API enables you to create, update,
and delete clusters and databases.
:ivar clusters: ClustersOperations operations
:vartype clusters: azure.mgmt.kusto.operations.ClustersOperations
:ivar cluster_principal_assignments: ClusterPrincipalAssignmentsOperations operations
:vartype cluster_principal_assignments:
azure.mgmt.kusto.operations.ClusterPrincipalAssignmentsOperations
:ivar databases: DatabasesOperations operations
:vartype databases: azure.mgmt.kusto.operations.DatabasesOperations
:ivar attached_database_configurations: AttachedDatabaseConfigurationsOperations operations
:vartype attached_database_configurations:
azure.mgmt.kusto.operations.AttachedDatabaseConfigurationsOperations
:ivar managed_private_endpoints: ManagedPrivateEndpointsOperations operations
:vartype managed_private_endpoints:
azure.mgmt.kusto.operations.ManagedPrivateEndpointsOperations
:ivar database_principal_assignments: DatabasePrincipalAssignmentsOperations operations
:vartype database_principal_assignments:
azure.mgmt.kusto.operations.DatabasePrincipalAssignmentsOperations
:ivar scripts: ScriptsOperations operations
:vartype scripts: azure.mgmt.kusto.operations.ScriptsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.kusto.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.kusto.operations.PrivateLinkResourcesOperations
:ivar data_connections: DataConnectionsOperations operations
:vartype data_connections: azure.mgmt.kusto.operations.DataConnectionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.kusto.operations.Operations
:ivar operations_results: OperationsResultsOperations operations
:vartype operations_results: azure.mgmt.kusto.operations.OperationsResultsOperations
:ivar operations_results_location: OperationsResultsLocationOperations operations
:vartype operations_results_location:
azure.mgmt.kusto.operations.OperationsResultsLocationOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-07-07". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = KustoManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.clusters = ClustersOperations(self._client, self._config, self._serialize, self._deserialize)
self.cluster_principal_assignments = ClusterPrincipalAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.databases = DatabasesOperations(self._client, self._config, self._serialize, self._deserialize)
self.attached_database_configurations = AttachedDatabaseConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.managed_private_endpoints = ManagedPrivateEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.database_principal_assignments = DatabasePrincipalAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.scripts = ScriptsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.data_connections = DataConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.operations_results = OperationsResultsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations_results_location = OperationsResultsLocationOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> KustoManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| {
"content_hash": "5141d81f113bc8fec6b284dcc834675e",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 111,
"avg_line_length": 50.214723926380366,
"alnum_prop": 0.7337813072693953,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6b9922457ce33124aa989c8601785abc3043e1a9",
"size": "8653",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/kusto/azure-mgmt-kusto/azure/mgmt/kusto/_kusto_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""This class extends pexpect.spawn to specialize setting up SSH connections.
This adds methods for login, logout, and expecting the shell prompt.
$Id$
"""
from pexpect import *
import pexpect
import time
import os
__all__ = ['ExceptionPxssh', 'pxssh']
# Exception classes used by this module.
class ExceptionPxssh(ExceptionPexpect):
"""Raised for pxssh exceptions.
"""
class pxssh (spawn):
"""This class extends pexpect.spawn to specialize setting up SSH
connections. This adds methods for login, logout, and expecting the shell
prompt. It does various tricky things to handle many situations in the SSH
login process. For example, if the session is your first login, then pxssh
automatically accepts the remote certificate; or if you have public key
authentication setup then pxssh won't wait for the password prompt.
pxssh uses the shell prompt to synchronize output from the remote host. In
order to make this more robust it sets the shell prompt to something more
unique than just $ or #. This should work on most Borne/Bash or Csh style
shells.
Example that runs a few commands on a remote server and prints the result::
import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
s.sendline ('uptime') # run a command
s.prompt() # match the prompt
print s.before # print everything before the prompt.
s.sendline ('ls -l')
s.prompt()
print s.before
s.sendline ('df')
s.prompt()
print s.before
s.logout()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
Note that if you have ssh-agent running while doing development with pxssh
then this can lead to a lot of confusion. Many X display managers (xdm,
gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
dialog box popup asking for a password during development. You should turn
off any key agents during testing. The 'force_password' attribute will turn
off public key authentication. This will only work if the remote SSH server
is configured to allow password logins. Example of using 'force_password'
attribute::
s = pxssh.pxssh()
s.force_password = True
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
"""
def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
spawn.__init__(self, None, timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
self.name = '<pxssh>'
#SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
#slightly different string than the regular expression to match it. This
#is because when you set the prompt the command will echo back, but we
#don't want to match the echoed command. So if we make the set command
#slightly different than the regex we eliminate the problem. To make the
#set command different we add a backslash in front of $. The $ doesn't
#need to be escaped, but it doesn't hurt and serves to make the set
#prompt command different than the regex.
# used to match the command-line prompt
self.UNIQUE_PROMPT = "\[PEXPECT\][\$\#] "
self.PROMPT = self.UNIQUE_PROMPT
# used to set shell command-line prompt to UNIQUE_PROMPT.
self.PROMPT_SET_SH = "PS1='[PEXPECT]\$ '"
self.PROMPT_SET_CSH = "set prompt='[PEXPECT]\$ '"
self.SSH_OPTS = "-o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
# Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
# displaying a GUI password dialog. I have not figured out how to
# disable only SSH_ASKPASS without also disabling X11 forwarding.
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
#self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
self.force_password = False
self.auto_prompt_reset = True
def levenshtein_distance(self, a,b):
"""This calculates the Levenshtein distance between a and b.
"""
n, m = len(a), len(b)
if n > m:
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def sync_original_prompt (self):
"""This attempts to find the prompt. Basically, press enter and record
the response; press enter again and record the response; if the two
responses are similar then assume we are at the original prompt. This
is a slow function. It can take over 10 seconds. """
# All of these timing pace values are magic.
# I came up with these based on what seemed reliable for
# connecting to a heavily loaded machine I have.
self.sendline()
time.sleep(0.1)
# If latency is worse than these values then this will fail.
try:
self.read_nonblocking(size=10000,timeout=1) # GAS: Clear out the cache before getting the prompt
except TIMEOUT:
pass
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
x = self.read_nonblocking(size=1000,timeout=1)
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
a = self.read_nonblocking(size=1000,timeout=1)
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
b = self.read_nonblocking(size=1000,timeout=1)
ld = self.levenshtein_distance(a,b)
len_a = len(a)
if len_a == 0:
return False
if float(ld)/len_a < 0.4:
return True
return False
### TODO: This is getting messy and I'm pretty sure this isn't perfect.
### TODO: I need to draw a flow chart for this.
def login (self,server,username,password='',terminal_type='ansi',original_prompt=r"[#$]",login_timeout=10,port=None,auto_prompt_reset=True,ssh_key=None):
"""This logs the user into the given server. It uses the
'original_prompt' to try to find the prompt right after login. When it
finds the prompt it immediately tries to reset the prompt to something
more easily matched. The default 'original_prompt' is very optimistic
and is easily fooled. It's more reliable to try to match the original
prompt as exactly as possible to prevent false matches by server
strings such as the "Message Of The Day". On many systems you can
disable the MOTD on the remote server by creating a zero-length file
called "~/.hushlogin" on the remote server. If a prompt cannot be found
then this will not necessarily cause the login to fail. In the case of
a timeout when looking for the prompt we assume that the original
prompt was so weird that we could not match it, so we use a few tricks
to guess when we have reached the prompt. Then we hope for the best and
blindly try to reset the prompt to something more unique. If that fails
then login() raises an ExceptionPxssh exception.
In some situations it is not possible or desirable to reset the
original prompt. In this case, set 'auto_prompt_reset' to False to
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
uses a unique prompt in the prompt() method. If the original prompt is
not reset then this will disable the prompt() method unless you
manually set the PROMPT attribute. """
ssh_options = '-q'
if self.force_password:
ssh_options = ssh_options + ' ' + self.SSH_OPTS
if port is not None:
ssh_options = ssh_options + ' -p %s'%(str(port))
if ssh_key is not None:
try:
os.path.isfile(ssh_key)
except:
raise ExceptionPxssh ('private ssh key does not exist')
ssh_options = ssh_options + ' -i %s' % (ssh_key)
cmd = "ssh %s -l %s %s" % (ssh_options, username, server)
# This does not distinguish between a remote server 'password' prompt
# and a local ssh 'passphrase' prompt (for unlocking a private key).
spawn._spawn(self, cmd)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT, "(?i)connection closed by remote host"], timeout=login_timeout)
# First phase
if i==0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
self.sendline("yes")
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==2: # password or passphrase
self.sendline(password)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==4:
self.sendline(terminal_type)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
# Second phase
if i==0:
# This is weird. This should not happen twice in a row.
self.close()
raise ExceptionPxssh ('Weird error. Got "are you sure" prompt twice.')
elif i==1: # can occur if you have a public key pair set to authenticate.
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
pass
elif i==2: # password prompt again
# For incorrect passwords, some ssh servers will
# ask for the password again, others return 'denied' right away.
# If we get the password prompt again then this means
# we didn't get the password right the first time.
self.close()
raise ExceptionPxssh ('password refused')
elif i==3: # permission denied -- password was bad.
self.close()
raise ExceptionPxssh ('permission denied')
elif i==4: # terminal type again? WTF?
self.close()
raise ExceptionPxssh ('Weird error. Got "terminal type" prompt twice.')
elif i==5: # Timeout
#This is tricky... I presume that we are at the command-line prompt.
#It may be that the shell prompt was so weird that we couldn't match
#it. Or it may be that we couldn't log in for some other reason. I
#can't be sure, but it's safe to guess that we did login because if
#I presume wrong and we are not logged in then this should be caught
#later when I try to set the shell prompt.
pass
elif i==6: # Connection closed by remote host
self.close()
raise ExceptionPxssh ('connection closed')
else: # Unexpected
self.close()
raise ExceptionPxssh ('unexpected login response')
if not self.sync_original_prompt():
self.close()
raise ExceptionPxssh ('could not synchronize with original prompt')
# We appear to be in.
# set shell prompt to something unique.
if auto_prompt_reset:
if not self.set_unique_prompt():
self.close()
raise ExceptionPxssh ('could not set shell prompt\n'+self.before)
return True
def logout (self):
"""This sends exit to the remote shell. If there are stopped jobs then
this automatically sends exit twice. """
self.sendline("exit")
index = self.expect([EOF, "(?i)there are stopped jobs"])
if index==1:
self.sendline("exit")
self.expect(EOF)
self.close()
def prompt (self, timeout=-1):
"""This matches the shell prompt. This is little more than a short-cut
to the expect() method. This returns True if the shell prompt was
matched. This returns False if a timeout was raised. Note that if you
called login() with auto_prompt_reset set to False then before calling
prompt() you must set the PROMPT attribute to a regex that prompt()
will use for matching the prompt. Calling prompt() will erase the
contents of the 'before' attribute even if no prompt is ever matched.
If timeout is not given or it is set to -1 then self.timeout is used.
"""
if timeout == -1:
timeout = self.timeout
i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
if i==1:
return False
return True
def set_unique_prompt (self):
"""This sets the remote prompt to something more unique than # or $.
This makes it easier for the prompt() method to match the shell prompt
unambiguously. This method is called automatically by the login()
method, but you may want to call it manually if you somehow reset the
shell prompt. For example, if you 'su' to a different user then you
will need to manually reset the prompt. This sends shell commands to
the remote host to set the prompt, so this assumes the remote host is
ready to receive commands.
Alternatively, you may use your own prompt pattern. Just set the PROMPT
attribute to a regular expression that matches it. In this case you
should call login() with auto_prompt_reset=False; then set the PROMPT
attribute. After that the prompt() method will try to match your prompt
pattern."""
self.sendline ("unset PROMPT_COMMAND")
self.sendline (self.PROMPT_SET_SH) # sh-style
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0: # csh-style
self.sendline (self.PROMPT_SET_CSH)
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0:
return False
return True
# vi:ts=4:sw=4:expandtab:ft=python:
| {
"content_hash": "49aefb83aa812b3e9c6a25f5610bffba",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 255,
"avg_line_length": 46.45092024539877,
"alnum_prop": 0.6235884567126725,
"repo_name": "rockaboxmedia/pexpect",
"id": "9dd67b30d73a2bc40a700a0912d1139684258d19",
"size": "15143",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pxssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2453"
},
{
"name": "Python",
"bytes": "346736"
}
],
"symlink_target": ""
} |
from tg import config
from tg import hooks
from tg.configuration import milestones
import logging
log = logging.getLogger('tgext.pyutilservice')
# This is the entry point of your extension, will be called
# both when the user plugs the extension manually or through tgext.pluggable
# What you write here has the same effect as writing it into app_cfg.py
# So it is possible to plug other extensions you depend on.
def plugme(configurator, options=None):
if options is None:
options = {}
log.info('Setting up tgext.pyutilservice extension...')
milestones.config_ready.register(SetupExtension(configurator))
# This is required to be compatible with the
# tgext.pluggable interface
return dict(appid='tgext.pyutilservice')
# Most of your extension initialization should probably happen here,
# where it's granted that .ini configuration file has already been loaded
# in tg.config but you can still register hooks or other milestones.
class SetupExtension(object):
def __init__(self, configurator):
self.configurator = configurator
def __call__(self):
print ('>>> Public files path is %s' % config['paths']['static_files'])
#log.info('>>> Public files path is %s' % config['paths']['static_files'])
hooks.register('startup', self.on_startup)
def echo_wrapper_factory(handler, config):
def echo_wrapper(controller, environ, context):
#log.info('Serving: %s' % context.request.path)
return handler(controller, environ, context)
return echo_wrapper
# Application Wrappers are much like easier WSGI Middleware
# that get a TurboGears context and return a Response object.
self.configurator.register_wrapper(echo_wrapper_factory)
def on_startup(self):
log.info('tgext.pyutilservice + Application Running!')
from .utility import Utility
from .urlutility import URLUtility
from .surveyvariable import UsersGroup, UsersPermission, SocialType, SessionData, RandomType, ClossType, CheckFinish, QuestionType, VoterDataMapType, MarriageStatusType, TypeQuestionProject, StatusOptionProject, PasswordType, FixDifficultyLevel
from .surveyvariable import ModulePages
from .managesession import ManageSession
from .converthtml2pdf import ConvertHtml2Pdf
from .convertpdf2image import ConvertPdf2image
from .tguserobject import extraLog
from .validatelanguage import checkLanguage, checkParamLanguage
| {
"content_hash": "8120dab71b9bfc96d2a48414ea8031ed",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 244,
"avg_line_length": 40.721311475409834,
"alnum_prop": 0.7363123993558777,
"repo_name": "tongpa/tgext.pyutilservice",
"id": "c55cf70f738940ac66c40a0f74ca376ffddf774c",
"size": "2484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tgext/pyutilservice/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "190"
},
{
"name": "Python",
"bytes": "36971"
}
],
"symlink_target": ""
} |
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
import sys
from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from cStringIO import StringIO
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in cls.tokens.keys():
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object):
__metaclass__ = LexerMeta
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE | re.UNICODE
DEFAULT_BUFSIZE = 4096
MAX_BUFSIZE = 2 ** 31
bufsize = DEFAULT_BUFSIZE
tokens = {
'root': [
(r'--.*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'--.*?$', tokens.Comment.Single),
(r'(\r|\n|\r\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([^\W\d]\w*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'[$:?%]\w+', tokens.Name.Placeholder),
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
(r'VALUES', tokens.Keyword),
(r'@[^\W\d_]\w+', tokens.Name),
(r'[^\W\d_]\w*(?=[.(])', tokens.Name), # see issue39
(r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[-]?[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]+', tokens.Number.Integer),
# TODO: Backslash escapes?
(r"(''|'.*?[^\\]')", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
(r'(\[.*[^\]]\])', tokens.Name),
(r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword),
(r'END( IF| LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL),
(r'(?<=\.)[^\W\d_]\w*', tokens.Name),
(r'[^\W\d_]\w*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline)
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def _decode(self, text):
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
else:
try:
text = text.decode(self.encoding)
except UnicodeDecodeError:
text = text.decode('unicode-escape')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
return text
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if isinstance(text, basestring):
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if sys.version_info[0] < 3 and isinstance(text, unicode):
text = StringIO(text.encode('utf-8'))
self.encoding = 'utf-8'
else:
text = StringIO(text)
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, stream, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens # see __call__, pylint:disable=E1101
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
text = stream.read(self.bufsize)
hasmore = len(text) == self.bufsize
text = self._decode(text)
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if hasmore and m.end() == len(text):
# Since this is end, token may be truncated
continue
# print rex.pattern
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
# reset bufsize
self.bufsize = self.DEFAULT_BUFSIZE
break
else:
if hasmore:
# we have no match, increase bufsize to parse lengthy
# tokens faster (see #86).
self.bufsize = min(self.bufsize * 2, self.MAX_BUFSIZE)
buf = stream.read(self.bufsize)
hasmore = len(buf) == self.bufsize
text = text[pos:] + self._decode(buf)
pos = 0
continue
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, u'\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql, encoding=None):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
if encoding is not None:
lexer.encoding = encoding
return lexer.get_tokens(sql)
| {
"content_hash": "b79ada2d07a4118e03070120d3c720c1",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 77,
"avg_line_length": 36.983379501385045,
"alnum_prop": 0.47060145307467605,
"repo_name": "dayutianfei/impala-Q",
"id": "4d200a608f7aa20298939556fbded98a2afc67d3",
"size": "13578",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "shell/ext-py/sqlparse-0.1.7/sqlparse/lexer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15836"
},
{
"name": "C++",
"bytes": "5750170"
},
{
"name": "CMake",
"bytes": "90614"
},
{
"name": "CSS",
"bytes": "86925"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3897527"
},
{
"name": "Lex",
"bytes": "21323"
},
{
"name": "Python",
"bytes": "1228318"
},
{
"name": "Shell",
"bytes": "131062"
},
{
"name": "Thrift",
"bytes": "241288"
},
{
"name": "Yacc",
"bytes": "80074"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from mock import patch
class Product(object):
pass
@patch('__main__.Product')
class MyTest(TestCase):
def test_one(self, MockSomeClass):
self.assertTrue(Product is MockSomeClass)
print "Success in mock class"
def test_two(self, MockSomeClass):
self.assertTrue(Product is MockSomeClass)
print "Success in mock class"
def not_a_test(self):
return 'something'
MyTest('test_one').test_one()
MyTest('test_two').test_two()
print MyTest('test_two').not_a_test() | {
"content_hash": "99d8d2e8498f0e4ed6242e1c57d0767b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 49,
"avg_line_length": 21.92,
"alnum_prop": 0.6751824817518248,
"repo_name": "peter-wangxu/python_play",
"id": "a8eb0efdcf59355b7615324ae7f2cff2e0d6f917",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mock_test/mock_class.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "392"
},
{
"name": "Python",
"bytes": "57944"
}
],
"symlink_target": ""
} |
import json
import csv
print("Opening location history data...")
inf = open("Takeout/LocationHistory.json", 'r')
in_data = json.load(inf)
inf.close()
print("Parsing...")
out_data = []
for x in in_data["locations"]:
out_row = [x["latitudeE7"] / 10000000, x["longitudeE7"] / 10000000, x["timestampMs"], x["accuracy"]]
out_data.append(out_row)
print("Outputting to CSV...")
of = open("Takeout/parsed.csv", 'w', newline='\n')
csv_file = csv.writer(of)
csv_file.writerow(["lat","lon","time","accuracy"])
for x in out_data:
csv_file.writerow(x)
print("Done")
| {
"content_hash": "928026b0053c561eae0c9e61c0119fd9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 101,
"avg_line_length": 21.692307692307693,
"alnum_prop": 0.6631205673758865,
"repo_name": "jacobjiggler/Location-Visualizer",
"id": "abeabc64e14b49a3c9cebd26c89452c4873069c6",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8518"
}
],
"symlink_target": ""
} |
import warnings
warnings.simplefilter('ignore', Warning)
from gencmd.tests.new import *
from gencmd.tests.controller import *
from gencmd.tests.model import *
from gencmd.tests.scaffold import *
| {
"content_hash": "7aab5d3e32ebbd9c813393e3da4fa1e7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 18.181818181818183,
"alnum_prop": 0.785,
"repo_name": "ikeikeikeike/django-spine",
"id": "ba19c9bfe113a874a2491979c7ab249f4f9d5079",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gencmd/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "6666"
},
{
"name": "JavaScript",
"bytes": "18679"
},
{
"name": "Python",
"bytes": "34365"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
} |
"""Plugin for Disney (Channel) Germany
Supports:
- http://video.disney.de/sehen/*
- http://disneychannel.de/sehen/*
- http://disneychannel.de/livestream
"""
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api.utils import parse_json
from streamlink.stream import HLSStream
_url_re = re.compile("http(s)?://(\w+\.)?disney(channel)?.de/")
# stream urls are in `Grill.burger`->stack->data->externals->data
_stream_hls_re = re.compile("\"hlsStreamUrl\":\s*(\"[^\"]+\")")
_stream_data_re = re.compile("\"dataUrl\":\s*(\"[^\"]+\")")
class DisneyDE(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = (_stream_hls_re.search(res.text) or
_stream_data_re.search(res.text))
if not match:
return
stream_url = parse_json(match.group(1))
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = DisneyDE
| {
"content_hash": "b6e2875bc7e3578b66b4ab09f6f69864",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 27.41025641025641,
"alnum_prop": 0.6379794200187091,
"repo_name": "sbstp/streamlink",
"id": "e60e2632eec63b5e1c0948f9039c12e018ed593f",
"size": "1069",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/disney_de.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "664141"
},
{
"name": "Shell",
"bytes": "13796"
}
],
"symlink_target": ""
} |
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Keystone Release Notes'
copyright = u'2015, Keystone Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pbr.version
keystone_version = pbr.version.VersionInfo('keystone')
# The full version, including alpha/beta/rc tags.
release = keystone_version.version_string_with_vcs()
# The short X.Y version.
version = keystone_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KeystoneReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KeystoneReleaseNotes.tex',
u'Keystone Release Notes Documentation',
u'Keystone Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'keystonereleasenotes', u'Keystone Release Notes Documentation',
[u'Keystone Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KeystoneReleaseNotes', u'Keystone Release Notes Documentation',
u'Keystone Developers', 'KeystoneReleaseNotes',
'Identity, Authentication and Access Management for OpenStack.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| {
"content_hash": "1fc67482b6de0063f1a7a5c947a5dd9a",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 79,
"avg_line_length": 32.08898305084746,
"alnum_prop": 0.7060610062062591,
"repo_name": "cernops/keystone",
"id": "6df2e041d0685ba82360d1b35370b500f4119711",
"size": "9154",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "releasenotes/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4691908"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/vehicle/shared_speederbike_pcd.iff"
result.attribute_template_id = -1
result.stfName("monster_name","speederbike")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f83ac7a01f9183f32b00bd87814abb96",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.846153846153847,
"alnum_prop": 0.7064516129032258,
"repo_name": "obi-two/Rebelion",
"id": "019bd344a9863cd79ab725fa2aa078b4a83ccd66",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/intangible/vehicle/shared_speederbike_pcd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nancumsum` -- cumulative sum of non-NaN values
- `nancumprod` -- cumulative product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanquantile` -- qth quantile of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import functools
import warnings
import numpy as np
from numpy.lib import function_base
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
'nancumsum', 'nancumprod', 'nanquantile'
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
mask = a != a
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _remove_nan_1d(arr1d, overwrite_input=False):
"""
Equivalent to arr1d[~arr1d.isnan()], but in a different order
Presumably faster as it incurs fewer copies
Parameters
----------
arr1d : ndarray
Array to remove nans from
overwrite_input : bool
True if `arr1d` can be modified in place
Returns
-------
res : ndarray
Array with nan elements removed
overwrite_input : bool
True if `res` can be modified in place, given the constraint on the
input
"""
c = np.isnan(arr1d)
s = np.nonzero(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4)
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
else:
if not overwrite_input:
arr1d = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
arr1d[s[:enonan.size]] = enonan
return arr1d[:-s.size], True
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore', divide='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanmin_dispatcher)
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanmax_dispatcher)
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def _nanargmin_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_nanargmin_dispatcher)
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def _nanargmax_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_nanargmax_dispatcher)
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nansum_dispatcher)
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
empty. In later versions zero is returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nansum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanprod_dispatcher)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as ones.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose product is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
nanprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_nancumsum_dispatcher)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1)
array([1])
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1)
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_nancumprod_dispatcher)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1)
array([1])
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanmean_dispatcher)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def _nanmedian1d(arr1d, overwrite_input=False):
"""
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
arr1d, overwrite_input = _remove_nan_1d(arr1d,
overwrite_input=overwrite_input)
if arr1d.size == 0:
return np.nan
return np.median(arr1d, overwrite_input=overwrite_input)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanmedian for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
if out is None:
return _nanmedian1d(part, overwrite_input)
else:
out[...] = _nanmedian1d(part, overwrite_input)
return out
else:
# for small medians use sort + indexing which is still faster than
# apply_along_axis
# benchmarked with shuffled (50, 50, x) containing a few NaN
if a.shape[axis] < 600:
return _nanmedian_small(a, axis, out, overwrite_input)
result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
sort + indexing median, faster for small medians along multiple
dimensions due to the high overhead of apply_along_axis
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
def _nanmedian_dispatcher(
a, axis=None, out=None, overwrite_input=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanmedian_dispatcher)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
Returns the median of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
array([ 6.5, 2., 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
3.0
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
# apply_along_axis in _nanmedian doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
interpolation=None, keepdims=None):
return (a, q, out)
@array_function_dispatch(_nanpercentile_dispatcher)
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth percentile of the data along the specified axis,
while ignoring nan values.
Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array, containing
nan values to be ignored.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
nanmean
nanmedian : equivalent to ``nanpercentile(..., 50)``
percentile, median, mean
nanquantile : equivalent to nanpercentile, but with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.true_divide(q, 100.0) # handles the asarray for us too
if not function_base._quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _nanquantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
interpolation=None, keepdims=None):
return (a, q, out)
@array_function_dispatch(_nanquantile_dispatcher)
def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth quantile of the data along the specified axis,
while ignoring nan values.
Returns the qth quantile(s) of the array elements.
.. versionadded:: 1.15.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array, containing
nan values to be ignored
q : array_like of float
Quantile or sequence of quantiles to compute, which must be between
0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
quantile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple quantiles are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
quantile
nanmean, nanmedian
nanmedian : equivalent to ``nanquantile(..., 0.5)``
nanpercentile : same as nanquantile, but with q in the range [0, 100].
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.quantile(a, 0.5)
nan
>>> np.nanquantile(a, 0.5)
3.5
>>> np.nanquantile(a, 0.5, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanquantile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanquantile(a, 0.5, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.asanyarray(q)
if not function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
return _nanquantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""Assumes that q is in [0, 1], and is an ndarray"""
# apply_along_axis in _nanpercentile doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = function_base._ureduce(
a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out,
overwrite_input=overwrite_input, interpolation=interpolation
)
if keepdims and keepdims is not np._NoValue:
return r.reshape(q.shape + k)
else:
return r
def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanquantile_1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
arr1d, overwrite_input = _remove_nan_1d(arr1d,
overwrite_input=overwrite_input)
if arr1d.size == 0:
return np.full(q.shape, np.nan)[()] # convert to scalar
return function_base._quantile_unchecked(
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation)
def _nanvar_dispatcher(
a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanvar_dispatcher)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : {int, tuple of int, None}, optional
Axis or axes along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
For this function to work on sub-classes of ndarray, they must define
`sum` with the kwarg `keepdims`
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# Compute mean
if type(arr) is np.matrix:
_keepdims = np._NoValue
else:
_keepdims = True
# we need to special case matrix for reverse compatibility
# in order for this to work, these sums need to be called with
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
np.subtract(arr, avg, out=arr, casting='unsafe')
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def _nanstd_dispatcher(
a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
return (a, out)
@array_function_dispatch(_nanstd_dispatcher)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : {int, tuple of int, None}, optional
Axis or axes along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this value is anything but the default it is passed through
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
| {
"content_hash": "60d4b937fb17e8a84528daa6764125a3",
"timestamp": "",
"source": "github",
"line_count": 1633,
"max_line_length": 89,
"avg_line_length": 35.35884874464176,
"alnum_prop": 0.6269721688228469,
"repo_name": "ryfeus/lambda-packs",
"id": "d73d844672da08bc4f50247b21817814d604ceff",
"size": "57741",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Opencv_pil/source36/numpy/lib/nanfunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from just.database import db
from just.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
| {
"content_hash": "b069f2b08d57af53cf884de7c54e2a30",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 24.366666666666667,
"alnum_prop": 0.6812585499316005,
"repo_name": "kenhancoder/just",
"id": "86e4826fd7e3a55a57bc7dbddb38e9f2d8198258",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "8671"
},
{
"name": "JavaScript",
"bytes": "181413"
},
{
"name": "Python",
"bytes": "33779"
}
],
"symlink_target": ""
} |
from .user import UserAdminModel
| {
"content_hash": "0d267fc3c9629c5614fd2140b4a886b2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "yevgnenll/but",
"id": "72934cd731c0551daca0e67d6d3ebf32074457f1",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "but/users/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1674"
},
{
"name": "HTML",
"bytes": "39940"
},
{
"name": "JavaScript",
"bytes": "9103"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Python",
"bytes": "41212"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0002_create_homepage'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.RichTextField(blank=True),
),
]
| {
"content_hash": "6f89167f57820cd3a540cb9e30eb8a0f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 21.94736842105263,
"alnum_prop": 0.6235011990407674,
"repo_name": "registerguard/celebrate",
"id": "cda9dcefaa4ad2e5d39f53f10ca234d0cd15c308",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/migrations/0003_homepage_body.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "211871"
},
{
"name": "HTML",
"bytes": "16612"
},
{
"name": "JavaScript",
"bytes": "897249"
},
{
"name": "Python",
"bytes": "33692"
}
],
"symlink_target": ""
} |
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
PreprintFactory,
AuthUserFactory,
SubjectFactory,
PreprintProviderFactory
)
from osf.models import PreprintService, Node
from website.util import permissions
from api.base.settings.defaults import API_BASE
from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin
from api_tests.preprints.views.test_preprint_list_mixin import PreprintIsPublishedListMixin, PreprintIsValidListMixin
class TestUserPreprints(ApiTestCase):
def setUp(self):
super(TestUserPreprints, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
self.preprint = PreprintFactory(title="Preprint User One", creator=self.user_one)
self.public_project = ProjectFactory(title="Public Project User One", is_public=True, creator=self.user_one)
self.private_project = ProjectFactory(title="Private Project User One", is_public=False, creator=self.user_one)
def tearDown(self):
super(TestUserPreprints, self).tearDown()
def test_authorized_in_gets_200(self):
url = "/{}users/{}/preprints/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_anonymous_gets_200(self):
url = "/{}users/{}/preprints/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_get_preprints_logged_in(self):
url = "/{}users/{}/preprints/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.preprint._id, ids)
assert_not_in(self.public_project._id, ids)
assert_not_in(self.private_project._id, ids)
def test_get_projects_not_logged_in(self):
url = "/{}users/{}/preprints/".format(API_BASE, self.user_one._id)
res = self.app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.preprint._id, ids)
assert_not_in(self.public_project._id, ids)
assert_not_in(self.private_project._id, ids)
def test_get_projects_logged_in_as_different_user(self):
url = "/{}users/{}/preprints/".format(API_BASE, self.user_one._id)
res = self.app.get(url, auth=self.user_two.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert_in(self.preprint._id, ids)
assert_not_in(self.public_project._id, ids)
assert_not_in(self.private_project._id, ids)
class TestUserPreprintsListFiltering(PreprintsListFilteringMixin, ApiTestCase):
def setUp(self):
self.user = AuthUserFactory()
self.provider = PreprintProviderFactory(name='Sockarxiv')
self.provider_two = PreprintProviderFactory(name='Piratearxiv')
self.provider_three = self.provider
self.project = ProjectFactory(creator=self.user)
self.project_two = ProjectFactory(creator=self.user)
self.project_three = ProjectFactory(creator=self.user)
self.url = '/{}users/{}/preprints/?version=2.2&'.format(API_BASE, self.user._id)
super(TestUserPreprintsListFiltering, self).setUp()
def test_provider_filter_equals_returns_one(self):
expected = [self.preprint_two._id]
res = self.app.get('{}{}'.format(self.provider_url, self.provider_two._id), auth=self.user.auth)
actual = [preprint['id'] for preprint in res.json['data']]
assert_equal(expected, actual)
class TestUserPreprintIsPublishedList(PreprintIsPublishedListMixin, ApiTestCase):
def setUp(self):
self.admin = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.provider_two = self.provider_one
self.published_project = ProjectFactory(creator=self.admin, is_public=True)
self.public_project = ProjectFactory(creator=self.admin, is_public=True)
self.url = '/{}users/{}/preprints/?version=2.2&'.format(API_BASE, self.admin._id)
super(TestUserPreprintIsPublishedList, self).setUp()
class TestUserPreprintIsValidList(PreprintIsValidListMixin, ApiTestCase):
def setUp(self):
self.admin = AuthUserFactory()
self.provider = PreprintProviderFactory()
self.project = ProjectFactory(creator=self.admin, is_public=True)
self.url = '/{}users/{}/preprints/?version=2.2&'.format(API_BASE, self.admin._id)
super(TestUserPreprintIsValidList, self).setUp()
# User nodes/preprints routes do not show private nodes to anyone but the self
def test_preprint_private_visible_write(self):
res = self.app.get(self.url, auth=self.write_contrib.auth)
assert len(res.json['data']) == 1
self.project.is_public = False
self.project.save()
res = self.app.get(self.url, auth=self.write_contrib.auth)
assert len(res.json['data']) == 0
| {
"content_hash": "d160c93c069c0ce8b54c88b5e81c3cb4",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 119,
"avg_line_length": 43.90833333333333,
"alnum_prop": 0.676029607136079,
"repo_name": "Nesiehr/osf.io",
"id": "facb942ef9c4844249e1ef681f8d6eb2966ce5e2",
"size": "5293",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api_tests/users/views/test_user_preprints_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "215077"
},
{
"name": "JavaScript",
"bytes": "1699002"
},
{
"name": "Mako",
"bytes": "650031"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7928034"
}
],
"symlink_target": ""
} |
import os,json
import shutil
import tempfile
import zipfile
class Template():
dir_path = os.path.dirname(os.path.realpath(__file__))
template_folder = "metadata-templates"
config_file = "package.json"
def __init__(self):
if is_installed_package(self.dir_path):
self.root_path = os.path.join(tempfile.gettempdir(), "salesforcexytools", "templates")
packageFile = os.path.dirname(self.dir_path)
extract_template(packageFile)
else:
self.root_path = self.dir_path
def load_config(self, type):
config_path = os.path.join(self.root_path, self.template_folder, self.config_file)
with open(config_path) as fp:
templates_config = json.loads(fp.read())
return templates_config[type]
def load_config_dict(self, type):
config = self.load_config(type)
config_dict = {}
for item in config:
config_dict[item["name"]] = item
return config_dict
def get(self, type, name):
template_path = os.path.join(self.root_path, self.template_folder, type, name)
print(template_path)
with open(template_path) as fp:
template = fp.read()
return template
def get_src(self, type, name, data):
src = self.get(type, name)
for key, value in data.items():
if value:
src = src.replace("{{ " + key + " }}", value)
return src
class AntConfig():
dir_path = os.path.dirname(os.path.realpath(__file__))
template_folder = "ant-templates"
MigrationTools_folder = "MigrationTools"
Deploy_folder = "DeployTools"
AntDataloader_folder = "AntDataloader"
def __init__(self):
if is_installed_package(self.dir_path):
self.root_path = os.path.join(tempfile.gettempdir(), "salesforcexytools", "templates")
packageFile = os.path.dirname(self.dir_path)
extract_template(packageFile)
else:
self.root_path = self.dir_path
self.template_ant_dataloader_path = os.path.join(self.root_path, self.template_folder, self.AntDataloader_folder)
def get_file(self, sub_folder, name):
template_path = os.path.join(self.root_path, self.template_folder, sub_folder, name)
with open(template_path) as fp:
template = fp.read()
return template
def build_migration_tools(self, save_path, config_data, template_name="MigrationTools"):
if not os.path.exists(save_path):
os.makedirs(save_path)
tmp_migration_tools_path = os.path.join(self.root_path, self.template_folder, template_name)
self._copy_all(tmp_migration_tools_path, save_path)
build_properties_src = self.get_file(template_name, "build.properties")
build_properties_src = build_properties_src.format(**config_data)
self._save_file(os.path.join(save_path, "build.properties"), build_properties_src)
build_xml_src = self.get_file(tmp_migration_tools_path, "build.xml")
build_xml_src = build_xml_src.replace("{jar_path}", config_data["jar_path"]) \
.replace("{target_proxy_body}", self._get_ant_proxy_body(config_data)) \
.replace("{jar_url_path}", config_data["jar_url_path"])
self._save_file(os.path.join(save_path, "build.xml"), build_xml_src)
def build_ant_dataloader(self, save_path, config_data):
if not os.path.exists(save_path):
os.makedirs(save_path)
self._copy_all(self.template_ant_dataloader_path, save_path)
build_properties_src = self.get_file(self.template_ant_dataloader_path, "config.properties")
build_properties_src = build_properties_src.format(**config_data)
self._save_file(os.path.join(save_path, "config.properties"), build_properties_src)
build_xml_src = self.get_file(self.template_ant_dataloader_path, "build.xml")
build_xml_src = build_xml_src.replace("{ant_export_xml}", config_data["ant_export_xml"]) \
.replace("{dataloader_jar_name}", config_data["dataloader_jar_name"])
self._save_file(os.path.join(save_path, "build.xml"), build_xml_src)
def _get_ant_proxy_body(self, config_data):
proxy_config = config_data["proxy"]
xml_str_list = []
if "use_proxy" in proxy_config and proxy_config["use_proxy"]:
if "nonproxyhosts" in proxy_config and proxy_config["nonproxyhosts"]:
xml_str_list.append('nonproxyhosts="' + proxy_config["nonproxyhosts"] + '"')
if "proxyhost" in proxy_config and proxy_config["proxyhost"]:
xml_str_list.append('proxyhost="' + proxy_config["proxyhost"] + '"')
if "proxypassword" in proxy_config and proxy_config["proxypassword"]:
xml_str_list.append('proxypassword="' + proxy_config["proxypassword"] + '"')
if "proxyport" in proxy_config and proxy_config["proxyport"]:
xml_str_list.append('proxyport="' + proxy_config["proxyport"] + '"')
if "proxyuser" in proxy_config and proxy_config["proxyuser"]:
xml_str_list.append('proxyuser="' + proxy_config["proxyuser"] + '"')
if "socksproxyhost" in proxy_config and proxy_config["socksproxyhost"]:
xml_str_list.append('socksproxyhost="' + proxy_config["socksproxyhost"] + '"')
if "socksproxyport"in proxy_config and proxy_config["socksproxyport"]:
xml_str_list.append('socksproxyport="' + proxy_config["socksproxyport"] + '"')
if len(xml_str_list) > 0 :
return "<setproxy " + " ".join(xml_str_list) + "/>"
else:
return ""
return ""
def _copy_all(self, org_path, dist_path):
for file_name in os.listdir(org_path):
full_file_name = os.path.join(org_path, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dist_path)
def _save_file(self, full_path, content, newline='\n', encoding='utf-8'):
try:
fp = open(full_path, "w", newline=newline, encoding=encoding)
fp.write(content)
except Exception as e:
print('save file error! ' + full_path)
finally:
fp.close()
def is_installed_package(fileName):
return ".sublime-package" in fileName
def extract_template(package_path):
print("package_path" + package_path)
root_path = os.path.join(tempfile.gettempdir(), "salesforcexytools")
try:
zfile = zipfile.ZipFile(package_path, 'r')
for filename in zfile.namelist():
if filename.endswith('/'): continue
if filename.endswith('.py'): continue
if filename.startswith("templates/"):
f = os.path.join(root_path, filename)
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
with open(f, "wb") as fp:
fp.write(zfile.read(filename))
except zipfile.BadZipFile as ex:
print(str(ex))
return | {
"content_hash": "ca2283a2bd7d2b6d7243d787b538c466",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 121,
"avg_line_length": 46.23076923076923,
"alnum_prop": 0.6019134775374376,
"repo_name": "exiahuang/SalesforceXyTools",
"id": "d4d811ecd72ebf958774b8248550f0bf611d40aa",
"size": "7212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Apex",
"bytes": "9303"
},
{
"name": "Batchfile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "2213820"
}
],
"symlink_target": ""
} |
from typing import Any, Dict, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorFieldContract(msrest.serialization.Model):
"""Error Field contract.
:ivar code: Property level error code.
:vartype code: str
:ivar message: Human-readable representation of property-level error.
:vartype message: str
:ivar target: Property name.
:vartype target: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
**kwargs
):
"""
:keyword code: Property level error code.
:paramtype code: str
:keyword message: Human-readable representation of property-level error.
:paramtype message: str
:keyword target: Property name.
:paramtype target: str
"""
super(ErrorFieldContract, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
class WorkbookError(msrest.serialization.Model):
"""Error message body that will indicate why the operation failed.
:ivar code: Service-defined error code. This code serves as a sub-status for the HTTP error
code specified in the response.
:vartype code: str
:ivar message: Human-readable representation of the error.
:vartype message: str
:ivar details: The list of invalid fields send in request, in case of validation error.
:vartype details:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.ErrorFieldContract]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorFieldContract]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
details: Optional[List["ErrorFieldContract"]] = None,
**kwargs
):
"""
:keyword code: Service-defined error code. This code serves as a sub-status for the HTTP error
code specified in the response.
:paramtype code: str
:keyword message: Human-readable representation of the error.
:paramtype message: str
:keyword details: The list of invalid fields send in request, in case of validation error.
:paramtype details:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.ErrorFieldContract]
"""
super(WorkbookError, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
class WorkbookTemplateResource(msrest.serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(WorkbookTemplateResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class WorkbookTemplate(WorkbookTemplateResource):
"""An Application Insights workbook template definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:ivar location: Required. Resource location.
:vartype location: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar priority: Priority of the template. Determines which template to open when a workbook
gallery is opened in viewer mode.
:vartype priority: int
:ivar author: Information about the author of the workbook template.
:vartype author: str
:ivar template_data: Valid JSON object containing workbook template payload.
:vartype template_data: any
:ivar galleries: Workbook galleries supported by the template.
:vartype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
:ivar localized: Key value pair of localized gallery. Each key is the locale code of languages
supported by the Azure portal.
:vartype localized: dict[str,
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateLocalizedGallery]]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'author': {'key': 'properties.author', 'type': 'str'},
'template_data': {'key': 'properties.templateData', 'type': 'object'},
'galleries': {'key': 'properties.galleries', 'type': '[WorkbookTemplateGallery]'},
'localized': {'key': 'properties.localized', 'type': '{[WorkbookTemplateLocalizedGallery]}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
priority: Optional[int] = None,
author: Optional[str] = None,
template_data: Optional[Any] = None,
galleries: Optional[List["WorkbookTemplateGallery"]] = None,
localized: Optional[Dict[str, List["WorkbookTemplateLocalizedGallery"]]] = None,
**kwargs
):
"""
:keyword location: Required. Resource location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword priority: Priority of the template. Determines which template to open when a workbook
gallery is opened in viewer mode.
:paramtype priority: int
:keyword author: Information about the author of the workbook template.
:paramtype author: str
:keyword template_data: Valid JSON object containing workbook template payload.
:paramtype template_data: any
:keyword galleries: Workbook galleries supported by the template.
:paramtype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
:keyword localized: Key value pair of localized gallery. Each key is the locale code of
languages supported by the Azure portal.
:paramtype localized: dict[str,
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateLocalizedGallery]]
"""
super(WorkbookTemplate, self).__init__(location=location, tags=tags, **kwargs)
self.priority = priority
self.author = author
self.template_data = template_data
self.galleries = galleries
self.localized = localized
class WorkbookTemplateGallery(msrest.serialization.Model):
"""Gallery information for a workbook template.
:ivar name: Name of the workbook template in the gallery.
:vartype name: str
:ivar category: Category for the gallery.
:vartype category: str
:ivar type: Type of workbook supported by the workbook template.
:vartype type: str
:ivar order: Order of the template within the gallery.
:vartype order: int
:ivar resource_type: Azure resource type supported by the gallery.
:vartype resource_type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
category: Optional[str] = None,
type: Optional[str] = None,
order: Optional[int] = None,
resource_type: Optional[str] = None,
**kwargs
):
"""
:keyword name: Name of the workbook template in the gallery.
:paramtype name: str
:keyword category: Category for the gallery.
:paramtype category: str
:keyword type: Type of workbook supported by the workbook template.
:paramtype type: str
:keyword order: Order of the template within the gallery.
:paramtype order: int
:keyword resource_type: Azure resource type supported by the gallery.
:paramtype resource_type: str
"""
super(WorkbookTemplateGallery, self).__init__(**kwargs)
self.name = name
self.category = category
self.type = type
self.order = order
self.resource_type = resource_type
class WorkbookTemplateLocalizedGallery(msrest.serialization.Model):
"""Localized template data and gallery information.
:ivar template_data: Valid JSON object containing workbook template payload.
:vartype template_data: any
:ivar galleries: Workbook galleries supported by the template.
:vartype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
"""
_attribute_map = {
'template_data': {'key': 'templateData', 'type': 'object'},
'galleries': {'key': 'galleries', 'type': '[WorkbookTemplateGallery]'},
}
def __init__(
self,
*,
template_data: Optional[Any] = None,
galleries: Optional[List["WorkbookTemplateGallery"]] = None,
**kwargs
):
"""
:keyword template_data: Valid JSON object containing workbook template payload.
:paramtype template_data: any
:keyword galleries: Workbook galleries supported by the template.
:paramtype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
"""
super(WorkbookTemplateLocalizedGallery, self).__init__(**kwargs)
self.template_data = template_data
self.galleries = galleries
class WorkbookTemplatesListResult(msrest.serialization.Model):
"""WorkbookTemplate list result.
:ivar value: An array of workbook templates.
:vartype value:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkbookTemplate]'},
}
def __init__(
self,
*,
value: Optional[List["WorkbookTemplate"]] = None,
**kwargs
):
"""
:keyword value: An array of workbook templates.
:paramtype value:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplate]
"""
super(WorkbookTemplatesListResult, self).__init__(**kwargs)
self.value = value
class WorkbookTemplateUpdateParameters(msrest.serialization.Model):
"""The parameters that can be provided when updating workbook template.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar priority: Priority of the template. Determines which template to open when a workbook
gallery is opened in viewer mode.
:vartype priority: int
:ivar author: Information about the author of the workbook template.
:vartype author: str
:ivar template_data: Valid JSON object containing workbook template payload.
:vartype template_data: any
:ivar galleries: Workbook galleries supported by the template.
:vartype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
:ivar localized: Key value pair of localized gallery. Each key is the locale code of languages
supported by the Azure portal.
:vartype localized: dict[str,
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateLocalizedGallery]]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'author': {'key': 'properties.author', 'type': 'str'},
'template_data': {'key': 'properties.templateData', 'type': 'object'},
'galleries': {'key': 'properties.galleries', 'type': '[WorkbookTemplateGallery]'},
'localized': {'key': 'properties.localized', 'type': '{[WorkbookTemplateLocalizedGallery]}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
priority: Optional[int] = None,
author: Optional[str] = None,
template_data: Optional[Any] = None,
galleries: Optional[List["WorkbookTemplateGallery"]] = None,
localized: Optional[Dict[str, List["WorkbookTemplateLocalizedGallery"]]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword priority: Priority of the template. Determines which template to open when a workbook
gallery is opened in viewer mode.
:paramtype priority: int
:keyword author: Information about the author of the workbook template.
:paramtype author: str
:keyword template_data: Valid JSON object containing workbook template payload.
:paramtype template_data: any
:keyword galleries: Workbook galleries supported by the template.
:paramtype galleries:
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateGallery]
:keyword localized: Key value pair of localized gallery. Each key is the locale code of
languages supported by the Azure portal.
:paramtype localized: dict[str,
list[~azure.mgmt.applicationinsights.v2019_10_17_preview.models.WorkbookTemplateLocalizedGallery]]
"""
super(WorkbookTemplateUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.priority = priority
self.author = author
self.template_data = template_data
self.galleries = galleries
self.localized = localized
| {
"content_hash": "c46ce0e95c7e53090bfc2c1de7f27d11",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 107,
"avg_line_length": 37.87980769230769,
"alnum_prop": 0.6328848838685113,
"repo_name": "Azure/azure-sdk-for-python",
"id": "fccf4cbe4e44e360afc0266dcee456b565a098c7",
"size": "16226",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_10_17_preview/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
def load_names(filename):
with open(filename) as f:
names = f.read().replace("\"", "").split(",")
return names
def calc_alphabetical_value(str):
ret = 0
for char in str.upper():
ret = ret + ord(char) - 64
return ret
def sum_namescores(names):
ret = i = 0
for i, name in enumerate(names):
v = calc_alphabetical_value(name)
#print "%d: Alphabetical value of '%s' is %d" % (i, name, v)
ret = ret + i * v
return ret
names = sorted(load_names("022_names.txt"))
print "Solution:", sum_namescores(names)
| {
"content_hash": "80573bc56d4c0a7c177a52c2391d1c9f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 68,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.5828970331588132,
"repo_name": "fbcom/project-euler",
"id": "ff36cc24214176ba9e22d45bbb82f41667ecb9e9",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "022_names_scores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10885"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
author='Stuart B. Wilkins',
description='Read SPE files',
license='GPL v3',
name='spefile',
py_modules=['spefile'],
version='1.6',
)
| {
"content_hash": "7296fbc94d8cb9f8104acfcf649bdd0c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 33,
"avg_line_length": 19.4,
"alnum_prop": 0.6288659793814433,
"repo_name": "NSLS-II/lightsource2-recipes",
"id": "31e5f78ee6ef23698b839e3fe74e6161d1dbe2e1",
"size": "194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "recipes-tag/spefile/src/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7341"
},
{
"name": "C",
"bytes": "796"
},
{
"name": "C++",
"bytes": "920"
},
{
"name": "Fortran",
"bytes": "2010"
},
{
"name": "Jupyter Notebook",
"bytes": "76785"
},
{
"name": "Python",
"bytes": "128047"
},
{
"name": "Shell",
"bytes": "60725"
}
],
"symlink_target": ""
} |
"""Continually polls and evaluates new checkpoints."""
import time
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from uflow import uflow_data
# pylint:disable=unused-import
from uflow import uflow_flags
from uflow import uflow_main
from uflow import uflow_plotting
FLAGS = flags.FLAGS
def evaluate():
"""Eval happens on GPU or CPU, and evals each checkpoint as it appears."""
tf.compat.v1.enable_eager_execution()
candidate_checkpoint = None
uflow = uflow_main.create_uflow()
evaluate_fn, _ = uflow_data.make_eval_function(
FLAGS.eval_on,
FLAGS.height,
FLAGS.width,
progress_bar=True,
plot_dir=FLAGS.plot_dir,
num_plots=50)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
while 1:
# Wait for a new checkpoint
while candidate_checkpoint == latest_checkpoint:
logging.log_every_n(logging.INFO,
'Waiting for a new checkpoint, at %s, latest is %s',
20, FLAGS.checkpoint_dir, latest_checkpoint)
time.sleep(0.5)
candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
latest_checkpoint = candidate_checkpoint
logging.info('New checkpoint found: %s', candidate_checkpoint)
# This forces the checkpoint manager to reexamine the checkpoint directory
# and become aware of the new checkpoint.
uflow.update_checkpoint_dir(FLAGS.checkpoint_dir)
uflow.restore()
eval_results = evaluate_fn(uflow)
uflow_plotting.print_eval(eval_results)
step = tf.compat.v1.train.get_global_step().numpy()
if step >= FLAGS.num_train_steps:
logging.info('Evaluator terminating - completed evaluation of checkpoint '
'from step %d', step)
return
def main(unused_argv):
gin.parse_config_files_and_bindings(FLAGS.config_file, FLAGS.gin_bindings)
# Make directories if they do not exist yet.
if FLAGS.checkpoint_dir and not tf.io.gfile.exists(FLAGS.checkpoint_dir):
logging.info('Making new checkpoint directory %s', FLAGS.checkpoint_dir)
tf.io.gfile.makedirs(FLAGS.checkpoint_dir)
if FLAGS.plot_dir and not tf.io.gfile.exists(FLAGS.plot_dir):
logging.info('Making new plot directory %s', FLAGS.plot_dir)
tf.io.gfile.makedirs(FLAGS.plot_dir)
if FLAGS.no_tf_function:
tf.config.experimental_run_functions_eagerly(True)
logging.info('TFFUNCTION DISABLED')
if FLAGS.eval_on:
evaluate()
else:
raise ValueError('evaluation needs --eval_on <dataset>.')
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "8112e4b75901918a7c371ca8e23c218f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 32.373493975903614,
"alnum_prop": 0.7018980275400074,
"repo_name": "google-research/google-research",
"id": "1b40ab2a20885f0f46ba958399e1293b1abe9a96",
"size": "3295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uflow/uflow_evaluator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class NewAccountSignUp(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'email': 'str',
'password': 'str',
'company_name': 'str'
}
attribute_map = {
'email': 'email',
'password': 'password',
'company_name': 'companyName'
}
def __init__(self, email=None, password=None, company_name=None, local_vars_configuration=None): # noqa: E501
"""NewAccountSignUp - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._email = None
self._password = None
self._company_name = None
self.discriminator = None
self.email = email
self.password = password
self.company_name = company_name
@property
def email(self):
"""Gets the email of this NewAccountSignUp. # noqa: E501
The email address associated with your account. # noqa: E501
:return: The email of this NewAccountSignUp. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this NewAccountSignUp.
The email address associated with your account. # noqa: E501
:param email: The email of this NewAccountSignUp. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def password(self):
"""Gets the password of this NewAccountSignUp. # noqa: E501
The password for your account. # noqa: E501
:return: The password of this NewAccountSignUp. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this NewAccountSignUp.
The password for your account. # noqa: E501
:param password: The password of this NewAccountSignUp. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and password is None: # noqa: E501
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def company_name(self):
"""Gets the company_name of this NewAccountSignUp. # noqa: E501
:return: The company_name of this NewAccountSignUp. # noqa: E501
:rtype: str
"""
return self._company_name
@company_name.setter
def company_name(self, company_name):
"""Sets the company_name of this NewAccountSignUp.
:param company_name: The company_name of this NewAccountSignUp. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and company_name is None: # noqa: E501
raise ValueError("Invalid value for `company_name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
company_name is not None and len(company_name) < 1):
raise ValueError("Invalid value for `company_name`, length must be greater than or equal to `1`") # noqa: E501
self._company_name = company_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewAccountSignUp):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewAccountSignUp):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "38778c4913bd3dea1a5a2f49b0f30684",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 732,
"avg_line_length": 35.03888888888889,
"alnum_prop": 0.6066275566830506,
"repo_name": "talon-one/talon_one.py",
"id": "e5eab9ebf04206d332f6be7e769a5d0ab7dd1c42",
"size": "6324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/new_account_sign_up.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
} |
"""
If you want to save message as .eml file, work with MailMessage.obj - it is email.message.EmailMessage
python email lib docs:
https://docs.python.org/3/library/email.message.html#email.message.EmailMessage.as_string
https://docs.python.org/3/library/email.message.html#email.message.EmailMessage.as_bytes
"""
| {
"content_hash": "729d31b1b64f56100171569a89a58a5c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 102,
"avg_line_length": 32.5,
"alnum_prop": 0.7569230769230769,
"repo_name": "ikvk/imap_tools",
"id": "9b9a57855081feb1493ff85b73222211c01c3d64",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/email_to_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "508307"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HcopersonsSolveRefCountryFamilyParentCommunityMan(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule copersonsSolveRefCountryFamilyParentCommunityMan.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HcopersonsSolveRefCountryFamilyParentCommunityMan, self).__init__(name='HcopersonsSolveRefCountryFamilyParentCommunityMan', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """copersonsSolveRefCountryFamilyParentCommunityMan"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'copersonsSolveRefCountryFamilyParentCommunityMan')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """copersonsSolveRefCountryFamilyParentCommunityMan"""
# match class Country() node
self.add_node()
self.vs[3]["mm__"] = """Country"""
self.vs[3]["attr1"] = """+"""
# match class Family() node
self.add_node()
self.vs[4]["mm__"] = """Family"""
self.vs[4]["attr1"] = """+"""
# match class Parent() node
self.add_node()
self.vs[5]["mm__"] = """Parent"""
self.vs[5]["attr1"] = """+"""
# apply class Community() node
self.add_node()
self.vs[6]["mm__"] = """Community"""
self.vs[6]["attr1"] = """1"""
# apply class Man() node
self.add_node()
self.vs[7]["mm__"] = """Man"""
self.vs[7]["attr1"] = """1"""
# match association Country--families-->Family node
self.add_node()
self.vs[8]["attr1"] = """families"""
self.vs[8]["mm__"] = """directLink_S"""
# match association Family--fathers-->Parent node
self.add_node()
self.vs[9]["attr1"] = """fathers"""
self.vs[9]["mm__"] = """directLink_S"""
# apply association Community--persons-->Man node
self.add_node()
self.vs[10]["attr1"] = """persons"""
self.vs[10]["mm__"] = """directLink_T"""
# backward association Country---->Community node
self.add_node()
self.vs[11]["mm__"] = """backward_link"""
# backward association Parent---->Man node
self.add_node()
self.vs[12]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Country()
(0,4), # matchmodel -> match_class Family()
(0,5), # matchmodel -> match_class Parent()
(1,6), # applymodel -> -> apply_class Community()
(1,7), # applymodel -> -> apply_class Man()
(3,8), # match_class Country() -> association families
(8,4), # association families -> match_class Family()
(4,9), # match_class Family() -> association fathers
(9,5), # association fathers -> match_class Parent()
(6,10), # apply_class Community() -> association persons
(10,7), # association persons -> apply_class Man()
(6,11), # apply_class Community() -> backward_association
(11,3), # backward_association -> apply_class Country()
(7,12), # apply_class Man() -> backward_association
(12,5), # backward_association -> apply_class Parent()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = []
| {
"content_hash": "a8f3f94bc63494d1701934365c89dca6",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 160,
"avg_line_length": 35.87068965517241,
"alnum_prop": 0.509973564047104,
"repo_name": "levilucio/SyVOLT",
"id": "3430a3f3154af9fa53198c943be819ed0178a564",
"size": "4161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExFamToPerson/transformation/no_contains/HcopersonsSolveRefCountryFamilyParentCommunityMan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
WSGI config for suqashexamples project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suqashexamples.settings")
application = get_wsgi_application()
| {
"content_hash": "967ed073722376c193118a8ddbc2b014",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.3125,
"alnum_prop": 0.7777777777777778,
"repo_name": "riklaunim/django-examples",
"id": "870db208567098b4d8a9adbe9d39bd87ae747464",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "suqashexamples/suqashexamples/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "411"
},
{
"name": "HTML",
"bytes": "9677"
},
{
"name": "JavaScript",
"bytes": "4534"
},
{
"name": "Python",
"bytes": "50140"
}
],
"symlink_target": ""
} |
"""
survey - Assessment Data Analysis Tool
@author: Graeme Foster <graeme at acm dot org>
For more details see the blueprint at:
http://eden.sahanafoundation.org/wiki/BluePrint/SurveyTool/ADAT
"""
"""
@todo: open template from the dataTables into the section tab not update
@todo: in the pages that add a link to a template make the combobox display the label not the numbers
@todo: restrict the deletion of a template to only those with status Pending
"""
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
import sys
sys.path.append("applications/%s/modules/s3" % request.application)
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import base64
import math
from gluon.contenttype import contenttype
from gluon.languages import read_dict, write_dict
from s3survey import S3AnalysisPriority, \
survey_question_type, \
survey_analysis_type, \
getMatrix, \
DEBUG, \
LayoutBlocks, \
DataMatrix, MatrixElement, \
S3QuestionTypeOptionWidget, \
survey_T
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
def template():
""" RESTful CRUD controller """
# Load Model
table = s3db.survey_template
s3 = response.s3
def prep(r):
if r.component and r.component_name == "translate":
table = db["survey_translate"]
# list existing translations and allow the addition of a new translation
if r.component_id == None:
table.file.readable = False
table.file.writable = False
# edit the selected translation
else:
table.language.writable = False
table.code.writable = False
# remove CRUD generated buttons in the tabs
s3mgr.configure(table,
deletable=False,
)
else:
s3_action_buttons(r)
query = (r.table.status == 1) # Status of Pending
rows = db(query).select(r.table.id)
try:
s3.actions[1]["restrict"].extend(str(row.id) for row in rows)
except KeyError: # the restrict key doesn't exist
s3.actions[1]["restrict"] = [str(row.id) for row in rows]
except IndexError: # the delete buttons doesn't exist
pass
s3mgr.configure(r.tablename,
orderby = "%s.status" % r.tablename,
create_next = URL(c="survey", f="template"),
update_next = URL(c="survey", f="template"),
)
return True
# Post-processor
def postp(r, output):
if r.component:
template_id = request.args[0]
if r.component_name == "section":
# Add the section select widget to the form
sectionSelect = s3.survey_section_select_widget(template_id)
output.update(form = sectionSelect)
return output
elif r.component_name == "translate":
s3_action_buttons(r)
s3.actions.append(
dict(label=str(T("Download")),
_class="action-btn",
url=URL(c=module,
f="templateTranslateDownload",
args=["[id]"])
),
)
s3.actions.append(
dict(label=str(T("Upload")),
_class="action-btn",
url=URL(c=module,
f="template",
args=[template_id,"translate","[id]"])
),
)
return output
# Add a button to show what the questionnaire looks like
# s3_action_buttons(r)
# s3.actions = s3.actions + [
# dict(label=str(T("Display")),
# _class="action-btn",
# url=URL(c=module,
# f="templateRead",
# args=["[id]"])
# ),
# ]
# Add some highlighting to the rows
query = (r.table.status == 3) # Status of closed
rows = db(query).select(r.table.id)
s3.dataTableStyleDisabled = [str(row.id) for row in rows]
s3.dataTableStyleWarning = [str(row.id) for row in rows]
query = (r.table.status == 1) # Status of Pending
rows = db(query).select(r.table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
query = (r.table.status == 4) # Status of Master
rows = db(query).select(r.table.id)
s3.dataTableStyleWarning.extend(str(row.id) for row in rows)
return output
if request.ajax:
post = request.post_vars
action = post.get("action")
template_id = post.get("parent_id")
section_id = post.get("section_id")
section_text = post.get("section_text")
if action == "section" and template_id != None:
id = db.survey_section.insert(name=section_text,
template_id=template_id,
cloned_section_id=section_id)
if id == None:
print "Failed to insert record"
return
response.s3.prep = prep
response.s3.postp = postp
rheader = response.s3.survey_template_rheader
# remove CRUD generated buttons in the tabs
s3mgr.configure("survey_template",
listadd=False,
deletable=False,
)
output = s3_rest_controller(rheader=rheader)
return output
def templateRead():
# Load Model
module = "survey"
resourcename = "template"
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
s3mgr.load("survey_complete")
s3 = response.s3
crud_strings = s3.crud_strings[tablename]
if "vars" in request and len(request.vars) > 0:
dummy, template_id = request.vars.viewing.split(".")
else:
template_id = request.args[0]
def postp(r, output):
if r.interactive:
template_id = r.id
form = s3.survey_buildQuestionnaireFromTemplate(template_id)
output["items"] = None
output["form"] = None
output["item"] = form
output["title"] = crud_strings.title_question_details
return output
# remove CRUD generated buttons in the tabs
s3mgr.configure(tablename,
listadd=False,
editable=False,
deletable=False,
)
response.s3.postp = postp
r = s3mgr.parse_request(module, resourcename, args=[template_id])
output = r(method = "read", rheader=s3.survey_template_rheader)
return output
def templateSummary():
# Load Model
module = "survey"
resourcename = "template"
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
s3mgr.load("survey_complete")
s3 = response.s3
crud_strings = s3.crud_strings[tablename]
def postp(r, output):
if r.interactive:
if "vars" in request and len(request.vars) > 0:
dummy, template_id = request.vars.viewing.split(".")
else:
template_id = r.id
form = s3.survey_build_template_summary(template_id)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
return output
# remove CRUD generated buttons in the tabs
s3mgr.configure(tablename,
listadd=False,
deletable=False,
)
response.s3.postp = postp
output = s3_rest_controller(module,
resourcename,
method = "list",
rheader=s3.survey_template_rheader
)
response.s3.actions = None
return output
def templateTranslateDownload():
# Load Model
module = "survey"
resourcename = "translate"
tablename = "%s_%s" % (module, resourcename)
s3mgr.load("survey_template")
s3mgr.load("survey_translate")
s3mgr.load("survey_complete")
try:
import xlwt
except ImportError:
redirect(URL(c="survey",
f="templateTranslation",
args=[],
vars = {}))
s3 = response.s3
record = s3.survey_getTranslation(request.args[0])
if record == None:
redirect(URL(c="survey",
f="templateTranslation",
args=[],
vars = {}))
code = record.code
language = record.language
lang_fileName = "applications/%s/languages/%s.py" % \
(request.application, code)
try:
strings = read_dict(lang_fileName)
except:
strings = dict()
template_id = record.template_id
template = s3.survey_getTemplate(template_id)
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(language)
output = StringIO()
qstnList = s3.survey_getAllQuestionsForTemplate(template_id)
original = {}
original[template["name"]] = True
if template["description"] != "":
original[template["description"]] = True
for qstn in qstnList:
original[qstn["name"]] = True
widgetObj = survey_question_type[qstn["type"]](question_id = qstn["qstn_id"])
if isinstance(widgetObj, S3QuestionTypeOptionWidget):
optionList = widgetObj.getList()
for option in optionList:
original[option] = True
sections = s3.survey_getAllSectionsForTemplate(template_id)
for section in sections:
original[section["name"]]=True
section_id = section["section_id"]
layoutRules = s3.survey_getQstnLayoutRules(template_id, section_id)
layoutStr = str(layoutRules)
posn = layoutStr.find("heading")
while posn != -1:
start = posn + 11
end = layoutStr.find("}", start)
original[layoutStr[start:end]] = True
posn = layoutStr.find("heading", end)
row = 0
sheet.write(row,
0,
unicode("Original")
)
sheet.write(row,
1,
unicode("Translation")
)
originalList = original.keys()
originalList.sort()
for text in originalList:
row += 1
original = unicode(text)
sheet.write(row,
0,
original
)
if (original in strings):
sheet.write(row,
1,
strings[original]
)
book.save(output)
output.seek(0)
response.headers["Content-Type"] = contenttype(".xls")
filename = "%s.xls" % code
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def series():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
table = db[tablename]
s3 = response.s3
s3.survey_answerlist_dataTable_pre()
def prep(r):
if r.interactive:
if r.method == "create":
allTemplates = response.s3.survey_getAllTemplates()
if len(allTemplates) == 0:
session.warning = T("You need to create a template before you can create a series")
redirect(URL(c="survey",
f="template",
args=[],
vars = {}))
if r.id and (r.method == "update"):
table.template_id.writable = False
return True
def postp(r, output):
if request.ajax == True and r.method == "read":
return output["item"]
s3 = response.s3
if r.component_name == None:
s3.survey_serieslist_dataTable_post(r)
elif r.component_name == "complete":
if r.method == "update":
if r.http == "GET":
form = s3.survey_buildQuestionnaireFromSeries(r.id,
r.component_id)
output["form"] = form
elif r.http == "POST":
if "post_vars" in request and len(request.post_vars) > 0:
id = s3.survey_save_answers_for_series(r.id,
r.component_id, # Update
request.post_vars)
response.flash = s3.crud_strings["survey_complete"].msg_record_modified
else:
s3.survey_answerlist_dataTable_post(r)
return output
# Remove CRUD generated buttons in the tabs
s3mgr.configure("survey_series",
deletable = False,)
s3mgr.configure("survey_complete",
listadd=False,
deletable=False)
s3.prep = prep
s3.postp = postp
output = s3_rest_controller(module,
resourcename,
rheader=s3.survey_series_rheader)
return output
def export_all_responses():
s3mgr.load("survey_series")
s3mgr.load("survey_section")
s3mgr.load("survey_complete")
# turn off lazy translation
# otherwise xlwt will crash if it comes across a T string
T.lazy = False
s3 = response.s3
try:
import xlwt
except ImportError:
output = s3_rest_controller("survey",
"series",
rheader=s3.survey_series_rheader)
return output
series_id = request.args[0]
seriesName = response.s3.survey_getSeriesName(series_id)
sectionBreak = False
filename = "%s_All_responses.xls" % seriesName
contentType = ".xls"
output = StringIO()
book = xlwt.Workbook(encoding="utf-8")
# get all questions and write out as a heading
col = 0
completeRow = {}
nextRow = 2
qstnList = response.s3.survey_getAllQuestionsForSeries(series_id)
if len(qstnList) > 256:
sectionList = s3.survey_getAllSectionsForSeries(series_id)
sectionBreak = True
if sectionBreak:
sheets = {}
cols = {}
for section in sectionList:
sheetName = section["name"].split(" ")[0]
if sheetName not in sheets:
sheets[sheetName] = book.add_sheet(sheetName)
cols[sheetName] = 0
else:
sheet = book.add_sheet(T("Responses"))
for qstn in qstnList:
if sectionBreak:
sheetName = qstn["section"].split(" ")[0]
sheet = sheets[sheetName]
col = cols[sheetName]
row = 0
sheet.write(row,col,qstn["code"])
row += 1
widgetObj = s3.survey_getWidgetFromQuestion(qstn["qstn_id"])
sheet.write(row,col,widgetObj.fullName())
# for each question get the response
allResponses = s3.survey_getAllAnswersForQuestionInSeries(qstn["qstn_id"],
series_id)
for answer in allResponses:
value = answer["value"]
complete_id = answer["complete_id"]
if complete_id in completeRow:
row = completeRow[complete_id]
else:
completeRow[complete_id] = nextRow
row = nextRow
nextRow += 1
sheet.write(row,col,value)
col += 1
if sectionBreak:
cols[sheetName] += 1
sheet.panes_frozen = True
sheet.horz_split_pos = 2
book.save(output)
T.lazy = True
# turn lazy translation back on
output.seek(0)
response.headers["Content-Type"] = contenttype(contentType)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def series_export_formatted():
s3mgr.load("survey_series")
s3mgr.load("survey_complete")
# Check that the series_id has been passed in
if len(request.args) != 1:
output = s3_rest_controller(module,
resourcename,
rheader=response.s3.survey_series_rheader)
return output
series_id = request.args[0]
vars = current.request.post_vars
seriesName = response.s3.survey_getSeriesName(series_id)
series = response.s3.survey_getSeries(series_id)
if not series.logo:
logo = None
else:
if "Export_Spreadsheet" in vars:
ext = "bmp"
else:
ext = "png"
logo = os.path.join(request.folder,
"uploads",
"survey",
"logo",
"%s.%s" %(series.logo,ext)
)
if not os.path.exists(logo) or not os.path.isfile(logo):
logo = None
# Get the translation dictionary
langDict = dict()
if "translationLanguage" in request.post_vars:
lang = request.post_vars.translationLanguage
if lang == "Default":
langDict = dict()
else:
try:
lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % (request.application, lang)
langDict = read_dict(lang_fileName)
except:
langDict = dict()
if "Export_Spreadsheet" in vars:
(matrix, matrixAnswers) = series_prepare_matrix(series_id,
series,
logo,
langDict,
justified = True
)
output = series_export_spreadsheet(matrix,
matrixAnswers,
logo,
)
filename = "%s.xls" % seriesName
contentType = ".xls"
elif "Export_Word" in vars:
template = response.s3.survey_getTemplateFromSeries(series_id)
template_id = template.id
title = "%s (%s)" % (series.name, template.name)
title = survey_T(title, langDict)
widgetList = response.s3.survey_getAllWidgetsForTemplate(template_id)
output = series_export_word(widgetList, langDict, title, logo)
filename = "%s.rtf" % seriesName
contentType = ".rtf"
else:
output = s3_rest_controller(module,
resourcename,
rheader=response.s3.survey_series_rheader)
return output
output.seek(0)
response.headers["Content-Type"] = contenttype(contentType)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def series_prepare_matrix(series_id, series, logo, langDict, justified = False):
module = "survey"
resourcename = "series"
tablename = "%s_%s" % (module, resourcename)
s3mgr.load("survey_series")
crud_strings = response.s3.crud_strings[tablename]
######################################################################
#
# Get the data
# ============
# * The sections within the template
# * The layout rules for each question
######################################################################
# Check that the series_id has been passed in
if len(request.args) != 1:
output = s3_rest_controller(module,
resourcename,
rheader=response.s3.survey_series_rheader)
return output
series_id = request.args[0]
template = response.s3.survey_getTemplateFromSeries(series_id)
template_id = template.id
sectionList = response.s3.survey_getAllSectionsForSeries(series_id)
title = "%s (%s)" % (series.name, template.name)
title = survey_T(title, langDict)
layout = []
for section in sectionList:
sectionName = survey_T(section["name"], langDict)
rules = response.s3.survey_getQstnLayoutRules(template_id,
section["section_id"]
)
layoutRules = [sectionName, rules]
layout.append(layoutRules)
widgetList = response.s3.survey_getAllWidgetsForTemplate(template_id)
layoutBlocks = LayoutBlocks()
######################################################################
#
# Store the questions into a matrix based on the layout and the space
# required for each question - for example an option question might
# need one row for each possible option, and if this is in a layout
# then the position needs to be recorded carefully...
#
######################################################################
preliminaryMatrix = getMatrix(title,
logo,
series,
layout,
widgetList,
False,
langDict,
showSectionLabels = False,
layoutBlocks = layoutBlocks
)
# print >> sys.stdout, "preliminaryMatrix layoutBlocks"
# print >> sys.stdout, layoutBlocks
if not justified:
return preliminaryMatrix
######################################################################
# Align the questions so that each row takes up the same space.
# This is done by storing resize and margin instructions with
# each widget that is being printed
######################################################################
layoutBlocks.align()
# print >> sys.stdout, "Aligned layoutBlocks"
# print >> sys.stdout, layoutBlocks
######################################################################
# Now rebuild the matrix with the spacing for each widget set up so
# that the document will be fully justified
######################################################################
layoutBlocks = LayoutBlocks()
(matrix1, matrix2) = getMatrix(title,
logo,
series,
layout,
widgetList,
True,
langDict,
showSectionLabels = False,
)
# f = open("/home/graeme/web2py/applications/eden/uploads/debug.txt","w+")
# print >> f, matrix1
return (matrix1, matrix2)
def series_export_word(widgetList, langDict, title, logo):
try:
from PyRTF import Document, \
Languages, \
Section, \
Image, \
Paragraph, \
ShadingPropertySet, \
ParagraphPropertySet, \
StandardColours, \
Colour, \
Table, \
Cell, \
Renderer
except ImportError:
output = s3_rest_controller(module,
resourcename,
rheader=response.s3.survey_series_rheader)
return output
output = StringIO()
doc = Document(default_language=Languages.EnglishUK)
section = Section()
ss = doc.StyleSheet
ps = ss.ParagraphStyles.Normal.Copy()
ps.SetName("NormalGrey")
ps.SetShadingPropertySet(ShadingPropertySet(pattern=1,
background=Colour('grey light', 224, 224, 224)))
ss.ParagraphStyles.append(ps)
ps = ss.ParagraphStyles.Normal.Copy()
ps.SetName("NormalCentre")
ps.SetParagraphPropertySet(ParagraphPropertySet(alignment=3))
ss.ParagraphStyles.append(ps)
doc.Sections.append(section)
heading = Paragraph(ss.ParagraphStyles.Heading1)
if logo:
image = Image(logo)
heading.append(image)
heading.append(title)
section.append(heading)
col = [2800, 6500]
table = Table(*col)
sortedwidgetList = sorted(widgetList.values(), key= lambda widget: widget.question.posn)
for widget in sortedwidgetList:
line = widget.writeToRTF(ss, langDict)
try:
table.AddRow(*line)
except:
if DEBUG:
raise
pass
section.append(table)
renderer = Renderer()
renderer.Write(doc, output)
return output
def series_export_spreadsheet(matrix, matrixAnswers, logo):
######################################################################
#
# Now take the matrix data type and generate a spreadsheet from it
#
######################################################################
import math
try:
import xlwt
except ImportError:
output = s3_rest_controller(module,
resourcename,
rheader=response.s3.survey_series_rheader)
return output
def wrapText(sheet, cell, style):
row = cell.row
col = cell.col
try:
text = unicode(cell.text)
except:
text = cell.text
width = 16
# Wrap text and calculate the row width and height
characters_in_cell = float(width-2)
twips_per_row = 255 #default row height for 10 point font
if cell.merged():
try:
sheet.write_merge(cell.row,
cell.row + cell.mergeV,
cell.col,
cell.col + cell.mergeH,
text,
style
)
except Exception as msg:
print >> sys.stderr, msg
print >> sys.stderr, "row: %s + vert: %s, col: %s + horiz %s" % (cell.row, cell.mergeV, cell.col, cell.mergeH)
posn = "%s,%s"%(cell.row, cell.col)
if matrix.matrix[posn]:
print >> sys.stderr, matrix.matrix[posn]
rows = math.ceil((len(text) / characters_in_cell) / (1 + cell.mergeH))
else:
sheet.write(cell.row,
cell.col,
text,
style
)
rows = math.ceil(len(text) / characters_in_cell)
new_row_height = int(rows * twips_per_row)
new_col_width = width * COL_WIDTH_MULTIPLIER
if sheet.row(row).height < new_row_height:
sheet.row(row).height = new_row_height
if sheet.col(col).width < new_col_width:
sheet.col(col).width = new_col_width
def mergeStyles(listTemplate, styleList):
"""
Take a list of styles and return a single style object with
all the differences from a newly created object added to the
resultant style.
"""
if len(styleList) == 0:
finalStyle = xlwt.XFStyle()
elif len(styleList) == 1:
finalStyle = listTemplate[styleList[0]]
else:
zeroStyle = xlwt.XFStyle()
finalStyle = xlwt.XFStyle()
for i in range(0,len(styleList)):
finalStyle = mergeObjectDiff(finalStyle,
listTemplate[styleList[i]],
zeroStyle)
return finalStyle
def mergeObjectDiff(baseObj, newObj, zeroObj):
"""
function to copy all the elements in newObj that are different from
the zeroObj and place them in the baseObj
"""
elementList = newObj.__dict__
for (element, value) in elementList.items():
try:
baseObj.__dict__[element] = mergeObjectDiff(baseObj.__dict__[element],
value,
zeroObj.__dict__[element])
except:
if zeroObj.__dict__[element] != value:
baseObj.__dict__[element] = value
return baseObj
COL_WIDTH_MULTIPLIER = 240
book = xlwt.Workbook(encoding="utf-8")
output = StringIO()
protection = xlwt.Protection()
protection.cell_locked = 1
noProtection = xlwt.Protection()
noProtection.cell_locked = 0
borders = xlwt.Borders()
borders.left = xlwt.Borders.DOTTED
borders.right = xlwt.Borders.DOTTED
borders.top = xlwt.Borders.DOTTED
borders.bottom = xlwt.Borders.DOTTED
borderT1 = xlwt.Borders()
borderT1.top = xlwt.Borders.THIN
borderT2 = xlwt.Borders()
borderT2.top = xlwt.Borders.MEDIUM
borderL1 = xlwt.Borders()
borderL1.left = xlwt.Borders.THIN
borderL2 = xlwt.Borders()
borderL2.left = xlwt.Borders.MEDIUM
borderR1 = xlwt.Borders()
borderR1.right = xlwt.Borders.THIN
borderR2 = xlwt.Borders()
borderR2.right = xlwt.Borders.MEDIUM
borderB1 = xlwt.Borders()
borderB1.bottom = xlwt.Borders.THIN
borderB2 = xlwt.Borders()
borderB2.bottom = xlwt.Borders.MEDIUM
alignBase = xlwt.Alignment()
alignBase.horz = xlwt.Alignment.HORZ_LEFT
alignBase.vert = xlwt.Alignment.VERT_TOP
alignWrap = xlwt.Alignment()
alignWrap.horz = xlwt.Alignment.HORZ_LEFT
alignWrap.vert = xlwt.Alignment.VERT_TOP
alignWrap.wrap = xlwt.Alignment.WRAP_AT_RIGHT
shadedFill = xlwt.Pattern()
shadedFill.pattern = xlwt.Pattern.SOLID_PATTERN
shadedFill.pattern_fore_colour = 0x16 # 25% Grey
shadedFill.pattern_back_colour = 0x08 # Black
headingFill = xlwt.Pattern()
headingFill.pattern = xlwt.Pattern.SOLID_PATTERN
headingFill.pattern_fore_colour = 0x1F # ice_blue
headingFill.pattern_back_colour = 0x08 # Black
styleTitle = xlwt.XFStyle()
styleTitle.font.height = 0x0140 # 320 twips, 16 points
styleTitle.font.bold = True
styleTitle.alignment = alignBase
styleHeader = xlwt.XFStyle()
styleHeader.font.height = 0x00F0 # 240 twips, 12 points
styleHeader.font.bold = True
styleHeader.alignment = alignBase
styleSubHeader = xlwt.XFStyle()
styleSubHeader.font.bold = True
styleSubHeader.alignment = alignWrap
styleSectionHeading = xlwt.XFStyle()
styleSectionHeading.font.bold = True
styleSectionHeading.alignment = alignWrap
styleSectionHeading.pattern = headingFill
styleHint = xlwt.XFStyle()
styleHint.protection = protection
styleHint.font.height = 160 # 160 twips, 8 points
styleHint.font.italic = True
styleHint.alignment = alignWrap
styleText = xlwt.XFStyle()
styleText.protection = protection
styleText.alignment = alignWrap
styleInstructions = xlwt.XFStyle()
styleInstructions.font.height = 0x00B4 # 180 twips, 9 points
styleInstructions.font.italic = True
styleInstructions.protection = protection
styleInstructions.alignment = alignWrap
styleBox = xlwt.XFStyle()
styleBox.borders = borders
styleBox.protection = noProtection
styleInput = xlwt.XFStyle()
styleInput.borders = borders
styleInput.protection = noProtection
styleInput.pattern = shadedFill
boxL1 = xlwt.XFStyle()
boxL1.borders = borderL1
boxL2 = xlwt.XFStyle()
boxL2.borders = borderL2
boxT1 = xlwt.XFStyle()
boxT1.borders = borderT1
boxT2 = xlwt.XFStyle()
boxT2.borders = borderT2
boxR1 = xlwt.XFStyle()
boxR1.borders = borderR1
boxR2 = xlwt.XFStyle()
boxR2.borders = borderR2
boxB1 = xlwt.XFStyle()
boxB1.borders = borderB1
boxB2 = xlwt.XFStyle()
boxB2.borders = borderB2
styleList = {}
styleList["styleTitle"] = styleTitle
styleList["styleHeader"] = styleHeader
styleList["styleSubHeader"] = styleSubHeader
styleList["styleSectionHeading"] = styleSectionHeading
styleList["styleHint"] = styleHint
styleList["styleText"] = styleText
styleList["styleInstructions"] = styleInstructions
styleList["styleInput"] = styleInput
styleList["boxL1"] = boxL1
styleList["boxL2"] = boxL2
styleList["boxT1"] = boxT1
styleList["boxT2"] = boxT2
styleList["boxR1"] = boxR1
styleList["boxR2"] = boxR2
styleList["boxB1"] = boxB1
styleList["boxB2"] = boxB2
sheet1 = book.add_sheet(T("Assessment"))
sheetA = book.add_sheet(T("Metadata"))
maxCol = 0
for cell in matrix.matrix.values():
if cell.col + cell.mergeH > 255:
print >> sys.stderr, "Cell (%s,%s) - (%s,%s) ignored" % (cell.col, cell.row, cell.col + cell.mergeH, cell.row + cell.mergeV)
continue
if cell.col + cell.mergeH > maxCol:
maxCol = cell.col + cell.mergeH
if cell.joined():
continue
style = mergeStyles(styleList, cell.styleList)
if (style.alignment.wrap == style.alignment.WRAP_AT_RIGHT):
# get all the styles from the joined cells
# and merge these styles in.
joinedStyles = matrix.joinedElementStyles(cell)
joinedStyle = mergeStyles(styleList, joinedStyles)
try:
wrapText(sheet1, cell, joinedStyle)
except:
pass
else:
if cell.merged():
# get all the styles from the joined cells
# and merge these styles in.
joinedStyles = matrix.joinedElementStyles(cell)
joinedStyle = mergeStyles(styleList, joinedStyles)
try:
sheet1.write_merge(cell.row,
cell.row + cell.mergeV,
cell.col,
cell.col + cell.mergeH,
unicode(cell.text),
joinedStyle
)
except Exception as msg:
print >> sys.stderr, msg
print >> sys.stderr, "row: %s + vert: %s, col: %s + horiz %s" % (cell.row, cell.mergeV, cell.col, cell.mergeH)
posn = "%s,%s"%(cell.row, cell.col)
if matrix.matrix[posn]:
print >> sys.stderr, matrix.matrix[posn]
else:
sheet1.write(cell.row,
cell.col,
unicode(cell.text),
style
)
cellWidth = 480 # approximately 2 characters
if maxCol > 255:
maxCol = 255
for col in range(maxCol+1):
sheet1.col(col).width = cellWidth
sheetA.write(0, 0, "Question Code")
sheetA.write(0, 1, "Response Count")
sheetA.write(0, 2, "Values")
sheetA.write(0, 3, "Cell Address")
for cell in matrixAnswers.matrix.values():
style = mergeStyles(styleList, cell.styleList)
sheetA.write(cell.row,
cell.col,
unicode(cell.text),
style
)
if logo != None:
sheet1.insert_bitmap(logo, 0, 0)
sheet1.protect = True
sheetA.protect = True
for i in range(26):
sheetA.col(i).width = 0
sheetA.write(0,
26,
unicode(T("Please do not remove this sheet")),
styleHeader
)
sheetA.col(26).width = 12000
book.save(output)
return output
def completed_chart():
""" RESTful CRUD controller
Allows the user to display all the data from the selected question
in a simple chart. If the data is numeric then a histogram will be
drawn if it is an option type then a pie chart, although the type of
chart drawn is managed by the analysis widget.
"""
# Load Model
s3mgr.load("survey_series")
s3mgr.load("survey_question")
if "series_id" in request.vars:
seriesID = request.vars.series_id
else:
return "Programming Error: Series ID missing"
if "question_id" in request.vars:
qstnID = request.vars.question_id
else:
return "Programming Error: Question ID missing"
if "type" in request.vars:
type = request.vars.type
else:
return "Programming Error: Question Type missing"
getAnswers = response.s3.survey_getAllAnswersForQuestionInSeries
answers = getAnswers(qstnID, seriesID)
analysisTool = survey_analysis_type[type](qstnID, answers)
qstnName = analysisTool.qstnWidget.question.name
image = analysisTool.drawChart(seriesID, output="png")
return image
def section():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
table = db[tablename]
def prep(r):
s3mgr.configure(r.tablename,
deletable = False,
orderby = r.tablename+".posn",
)
return True
# Post-processor
def postp(r, output):
""" Add the section select widget to the form """
try:
template_id = int(request.args[0])
except:
template_id = None
sectionSelect = response.s3.survey_section_select_widget(template_id)
output["sectionSelect"] = sectionSelect
return output
response.s3.prep = prep
response.s3.postp = postp
rheader = response.s3.survey_section_rheader
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
def question():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
table = db[tablename]
def prep(r):
s3mgr.configure(r.tablename,
orderby = r.tablename+".posn",
)
return True
# Post-processor
def postp(r, output):
return output
response.s3.prep = prep
response.s3.postp = postp
rheader = response.s3.survey_section_rheader
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
def question_list():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
s3mgr.load("survey_complete")
table = db[tablename]
output = s3_rest_controller(module, resourcename)
return output
def formatter():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
table = db[tablename]
output = s3_rest_controller(module, resourcename)
return output
def question_metadata():
""" RESTful CRUD controller """
# Load Model
tablename = "%s_%s" % (module, resourcename)
s3mgr.load(tablename)
table = db[tablename]
output = s3_rest_controller(module, resourcename)
return output
def newAssessment():
""" RESTful CRUD controller """
# Load Model
module = "survey"
resourcename = "complete"
tablename = "%s_%s" % (module, resourcename)
s3mgr.load("survey_complete")
s3mgr.load("survey_series")
table = db[tablename]
s3 = response.s3
def prep(r):
if r.interactive:
if "viewing" in request.vars:
dummy, series_id = request.vars.viewing.split(".")
elif "series" in request.vars:
series_id = request.vars.series
else:
series_id = r.id
if series_id == None:
# The URL is bad, without a series id we're lost so list all series
redirect(URL(c="survey",
f="series",
args=[],
vars = {}))
if "post_vars" in request and len(request.post_vars) > 0:
id = s3.survey_save_answers_for_series(series_id,
None, # Insert
request.post_vars)
response.confirmation = \
s3.crud_strings["survey_complete"].msg_record_created
return True
def postp(r, output):
if r.interactive:
if "viewing" in request.vars:
dummy, series_id = request.vars.viewing.split(".")
elif "series" in request.vars:
series_id = request.vars.series
else:
series_id = r.id
if output["form"] == None:
# The user is not authorised to create so switch to read
redirect(URL(c="survey",
f="series",
args=[series_id,"read"],
vars = {}))
s3.survey_answerlist_dataTable_post(r)
form = s3.survey_buildQuestionnaireFromSeries(series_id, None)
urlimport = URL(c=module,
f="complete",
args=["import"],
vars = {"viewing":"%s.%s" % ("survey_series", series_id)
,"single_pass":True}
)
buttons = DIV (A(T("Upload Completed Assessment Form"),
_href=urlimport,
_id="Excel-import",
_class="action-btn"
),
)
output["subtitle"] = buttons
output["form"] = form
return output
response.s3.prep = prep
response.s3.postp = postp
output = s3_rest_controller(module,
resourcename,
method = "create",
rheader=s3.survey_series_rheader
)
return output
def complete():
""" RESTful CRUD controller """
# Load Model
s3mgr.load("survey_complete")
s3mgr.load("survey_series")
table = db["survey_complete"]
s3 = response.s3
s3.survey_answerlist_dataTable_pre()
def postp(r, output):
if r.method == "import":
pass # don't want the import dataTable to be modified
else:
s3.survey_answerlist_dataTable_post(r)
return output
def import_xls(uploadFile):
if series_id == None:
response.error = T("Series details missing")
return
openFile = StringIO()
from datetime import date
try:
import xlrd
from xlwt.Utils import cell_to_rowcol2
except ImportError:
print >> sys.stderr, "ERROR: xlrd & xlwt modules are needed for importing spreadsheets"
return None
workbook = xlrd.open_workbook(file_contents=uploadFile)
try:
sheetR = workbook.sheet_by_name("Assessment")
sheetM = workbook.sheet_by_name("Metadata")
except:
session.error = T("You need to use the spreadsheet which you can download from this page")
redirect(URL(c="survey",
f="newAssessment",
args=[],
vars = {"viewing":"survey_series.%s" % series_id}))
header = ''
body = ''
for row in xrange(1, sheetM.nrows):
header += ',"%s"' % sheetM.cell_value(row, 0)
code = sheetM.cell_value(row, 0)
qstn = s3.survey_getQuestionFromCode(code, series_id)
type = qstn["type"]
count = sheetM.cell_value(row, 1)
if count != "":
count = int(count)
optionList = sheetM.cell_value(row, 2).split("|#|")
else:
count = 1
optionList = None
if type == "Location" and optionList != None:
answerList = {}
elif type == "MultiOption":
answerList = []
else:
answerList = ''
for col in range(count):
cell = sheetM.cell_value(row, 3+col)
(rowR, colR) = cell_to_rowcol2(cell)
try:
cellValue = sheetR.cell_value(rowR, colR)
except IndexError:
cellValue = ""
"""
BUG: The option list needs to work in different ways
depending on the question type. The question type should
be added to the spreadsheet to save extra db calls:
* Location save all the data as a hierarchy
* MultiOption save all selections
* Option save the last selection
"""
if cellValue != "":
if optionList != None:
if type == "Location":
answerList[optionList[col]]=cellValue
elif type == "MultiOption":
answerList.append(optionList[col])
else:
answerList = optionList[col]
else:
if type == "Date":
try:
(dtYear, dtMonth, dtDay, dtHour, dtMinute, dtSecond) = \
xlrd.xldate_as_tuple(cellValue,
workbook.datemode)
dtValue = date(dtYear, dtMonth, dtDay)
cellValue = dtValue.isoformat()
except:
pass
elif type == "Time":
try:
time = cellValue
hour = int(time*24)
minute = int((time*24-hour)*60)
cellValue = "%s:%s" % (hour, minute)
except:
pass
answerList += "%s" % cellValue
body += ',"%s"' % answerList
openFile.write(header)
openFile.write("\n")
openFile.write(body)
openFile.seek(0)
return openFile
series_id = None
try:
if "viewing" in request.vars:
dummy, series_id = request.vars.viewing.split(".")
series_name = response.s3.survey_getSeriesName(series_id)
if series_name != "":
csv_extra_fields = [dict(label="Series", value=series_name)]
else:
csv_extra_fields = []
except:
csv_extra_fields = []
s3mgr.configure("survey_complete",
listadd=False,
deletable=False)
response.s3.postp = postp
response.s3.xls_parser = import_xls
output = s3_rest_controller(module, resourcename,
csv_extra_fields=csv_extra_fields)
return output
def answer():
""" RESTful CRUD controller """
# Load Model
s3mgr.load("survey_answer")
s3mgr.load("survey_question")
table = db["survey_answer"]
output = s3_rest_controller(module, resourcename)
return output
def analysis():
""" Bespoke controller """
# Load Model
# tablename = "%s_%s" % (module, resourcename)
# s3mgr.load(tablename)
# table = db[tablename]
try:
template_id = request.args[0]
except:
pass
s3mgr.configure("survey_complete",
listadd=False,
deletable=False)
output = s3_rest_controller(module, "complete")
return output
| {
"content_hash": "070223deeb24e7ab8c8b8b5e1b0dc0bc",
"timestamp": "",
"source": "github",
"line_count": 1338,
"max_line_length": 137,
"avg_line_length": 36.89611360239163,
"alnum_prop": 0.5185852897684688,
"repo_name": "flavour/iscram",
"id": "3ebef66d511278b8dae1c961a19973508525b28e",
"size": "49392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/survey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10046797"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21691465"
}
],
"symlink_target": ""
} |
import logging
import eventlet
from oslo_messaging._drivers.zmq_driver import zmq_poller
LOG = logging.getLogger(__name__)
class GreenPoller(zmq_poller.ZmqPoller):
def __init__(self):
self.incoming_queue = eventlet.queue.LightQueue()
self.thread_by_socket = {}
def register(self, socket, recv_method=None):
if socket not in self.thread_by_socket:
LOG.debug("Registering socket %s", socket.handle.identity)
self.thread_by_socket[socket] = eventlet.spawn(
self._socket_receive, socket, recv_method
)
def unregister(self, socket):
thread = self.thread_by_socket.pop(socket, None)
if thread:
LOG.debug("Unregistering socket %s", socket.handle.identity)
thread.kill()
def _socket_receive(self, socket, recv_method=None):
while True:
if recv_method:
incoming = recv_method(socket)
else:
incoming = socket.recv_multipart()
self.incoming_queue.put((incoming, socket))
eventlet.sleep()
def poll(self, timeout=None):
try:
return self.incoming_queue.get(timeout=timeout)
except eventlet.queue.Empty:
return None, None
def close(self):
for thread in self.thread_by_socket.values():
thread.kill()
self.thread_by_socket = {}
class GreenExecutor(zmq_poller.Executor):
def __init__(self, method):
self._method = method
super(GreenExecutor, self).__init__(None)
def _loop(self):
while True:
self._method()
eventlet.sleep()
def execute(self):
if self.thread is None:
self.thread = eventlet.spawn(self._loop)
def stop(self):
if self.thread is not None:
self.thread.kill()
self.thread = None
| {
"content_hash": "151124382fd361ea2c5762c3e93c1d00",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 72,
"avg_line_length": 27.955882352941178,
"alnum_prop": 0.5860073645449764,
"repo_name": "ozamiatin/oslo.messaging",
"id": "fdf9b442d95a6cb8cd2c321ec719f066b528ab1e",
"size": "2511",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oslo_messaging/_drivers/zmq_driver/poller/green_poller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1221796"
},
{
"name": "Shell",
"bytes": "8290"
}
],
"symlink_target": ""
} |
import multiprocessing
import optparse
import os
from os.path import join
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"]
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
VARIANT_FLAGS = [[],
["--stress-opt", "--always-opt"],
["--nocrankshaft"]]
MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"],
"release" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
"nacl_x64"]
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("--junitout", help="File name of the JUnit output")
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
# Architecture and mode related stuff.
if options.arch_and_mode:
tokens = options.arch_and_mode.split(".")
options.arch = tokens[0]
options.mode = tokens[1]
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Special processing of other options, sorted alphabetically.
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.no_stress:
VARIANT_FLAGS = [[], ["--nocrankshaft"]]
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.stress_only:
VARIANT_FLAGS = [["--stress-opt", "--always-opt"]]
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
return True
def ShardTests(tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
exit_code = 0
workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
code = subprocess.call(
[sys.executable, join(workspace, "tools", "presubmit.py")])
exit_code = code
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
suite_paths = [ s for s in suite_paths if s in args_suites ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
if options.download_data:
for s in suites:
s.DownloadData()
for mode in options.mode:
for arch in options.arch:
code = Execute(arch, mode, args, options, suites, workspace)
exit_code = exit_code or code
return exit_code
def Execute(arch, mode, args, options, suites, workspace):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags)
# Find available test suites and read test cases from them.
variables = {
"mode": mode,
"arch": arch,
"system": utils.GuessOS(),
"isolates": options.isolates
}
all_tests = []
num_tests = 0
test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_flags = s.VariantFlags() or VARIANT_FLAGS
s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
if num_tests == 0:
print "No tests to run."
return 0
# Run the tests, either locally or distributed on the network.
try:
start_time = time.time()
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
if options.junitout:
progress_indicator = progress.JUnitTestProgressIndicator(
progress_indicator, options.junitout, options.junittestsuite)
run_networked = not options.no_network
if not run_networked:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
peers = []
if run_networked:
peers = network_execution.GetPeers()
if not peers:
print("No connection to distribution server; running tests locally.")
run_networked = False
elif len(peers) == 1:
print("No other peers on the network; running tests locally.")
run_networked = False
elif num_tests <= 100:
print("Less than 100 tests, running them locally.")
run_networked = False
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
ctx, peers, workspace)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
if runner.terminate:
return exit_code
overall_duration = time.time() - start_time
except KeyboardInterrupt:
return 1
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
return exit_code
if __name__ == "__main__":
sys.exit(Main())
| {
"content_hash": "580e24c6247ee36ea0de4c1c44c3fd84",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 80,
"avg_line_length": 35.84788732394366,
"alnum_prop": 0.6064749332076065,
"repo_name": "espadrine/opera",
"id": "959fe48579ec2e0eb55e19358a15f64590167b9a",
"size": "14323",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chromium/src/v8/tools/run-tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingMedian'] , ['NoCycle'] , ['SVR'] ); | {
"content_hash": "9e26adb0b10e6971e3d6ed8fbb73b688",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 37,
"alnum_prop": 0.6959459459459459,
"repo_name": "antoinecarme/pyaf",
"id": "be8a95f7ae82176c895f699a8768e09c1e91d816",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingMedian_NoCycle_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from recognize_albums.models import Album, Image
class ImageInline(admin.TabularInline):
model = Image
extra = 1
fields = ['thumbnail','image',]
readonly_fields = ['thumbnail',]
class AlbumAdmin(admin.ModelAdmin):
inlines = [ImageInline]
admin.site.register(Album, AlbumAdmin)
| {
"content_hash": "868675e32d8f98d0983b0cc676329af8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.7245508982035929,
"repo_name": "NCSUWebClass/fall14-recognize4",
"id": "ffe25db0325620139bb9f4da3133872a40bf2aa0",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/recognize_albums/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42429"
},
{
"name": "JavaScript",
"bytes": "4707"
},
{
"name": "Python",
"bytes": "5227"
},
{
"name": "Shell",
"bytes": "1024"
}
],
"symlink_target": ""
} |
import copy
import numpy
import wendy
import hom2m
########################## SELF-GRAVITATING DISK TOOLS ########################
def sample_sech2(sigma,totmass,n=1):
# compute zh based on sigma and totmass
zh= sigma**2./totmass # twopiG = 1. in our units
x= numpy.arctanh(2.*numpy.random.uniform(size=n)-1)*zh*2.
v= numpy.random.normal(size=n)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)*totmass/n
return (x,v,m)
############################### M2M FORCE-OF-CHANGE ###########################
# All defined here as the straight d constraint / d parameter (i.e., does *not*
# include things like eps, weight)
def force_of_change_weights(w_m2m,zsun_m2m,z_m2m,vz_m2m,
data_dicts,
prior,mu,w_prior,
h_m2m=0.02,
kernel=hom2m.epanechnikov_kernel,
delta_m2m=None):
"""Computes the force of change for all of the weights"""
fcw= numpy.zeros_like(w_m2m)
delta_m2m_new= []
if delta_m2m is None: delta_m2m= [None for d in data_dicts]
for ii,data_dict in enumerate(data_dicts):
if data_dict['type'].lower() == 'dens':
tfcw, tdelta_m2m_new=\
hom2m.force_of_change_density_weights(\
numpy.sum(w_m2m[:,data_dict['pops']],axis=1),
zsun_m2m,z_m2m,vz_m2m,
data_dict['zobs'],data_dict['obs'],data_dict['unc'],
h_m2m=h_m2m,kernel=kernel,delta_m2m=delta_m2m[ii])
elif data_dict['type'].lower() == 'v2':
tfcw, tdelta_m2m_new=\
hom2m.force_of_change_v2_weights(\
numpy.sum(w_m2m[:,data_dict['pops']],axis=1),
zsun_m2m,z_m2m,vz_m2m,
data_dict['zobs'],data_dict['obs'],data_dict['unc'],
h_m2m=h_m2m,kernel=kernel,deltav2_m2m=delta_m2m[ii])
else:
raise ValueError("'type' of measurement in data_dict not understood")
fcw[:,data_dict['pops']]+= numpy.atleast_2d(tfcw).T
delta_m2m_new.extend(tdelta_m2m_new)
# Add prior
fcw+= hom2m.force_of_change_prior_weights(w_m2m,mu,w_prior,prior)
return (fcw,delta_m2m_new)
################################ M2M OPTIMIZATION #############################
def parse_data_dict(data_dicts):
"""
NAME:
parse_data_dict
PURPOSE:
parse the data_dict input to M2M routines
INPUT:
data_dicts - list of data_dicts
OUTPUT:
cleaned-up version of data_dicts
HISTORY:
2017-07-20 - Written - Bovy (UofT)
"""
for data_dict in data_dicts:
if isinstance(data_dict['pops'],int):
data_dict['pops']= [data_dict['pops']]
return data_dict
def fit_m2m(w_init,z_init,vz_init,
omega_m2m,zsun_m2m,
data_dicts,
step=0.001,nstep=1000,
eps=0.1,mu=1.,prior='entropy',w_prior=None,
kernel=hom2m.epanechnikov_kernel,
kernel_deriv=hom2m.epanechnikov_kernel_deriv,
h_m2m=0.02,
npop=1,
smooth=None,st96smooth=False,
output_wevolution=False,
fit_zsun=False,fit_omega=False,
skipomega=10,delta_omega=0.3):
"""
NAME:
fit_m2m
PURPOSE:
Run M2M optimization for wendy M2M
INPUT:
w_init - initial weights [N] or [N,npop]
z_init - initial z [N]
vz_init - initial vz (rad) [N]
omega_m2m - potential parameter omega
zsun_m2m - Sun's height above the plane [N]
data_dicts - list of dictionaries that hold the data, these are described in more detail below
step= stepsize of orbit integration
nstep= number of steps to integrate the orbits for
eps= M2M epsilon parameter (can be array when fitting zsun, omega; in that case eps[0] = eps_weights, eps[1] = eps_zsun, eps[1 or 2 based on fit_zsun] = eps_omega)
mu= M2M entropy parameter mu
prior= ('entropy' or 'gamma')
w_prior= (None) prior weights (if None, equal to w_init)
fit_zsun= (False) if True, also optimize zsun
fit_omega= (False) if True, also optimize omega
skipomega= only update omega every skipomega steps
delta_omega= (0.3) difference in omega to use to compute derivative of objective function wrt omega
kernel= a smoothing kernel
kernel_deriv= the derivative of the smoothing kernel
h_m2m= kernel size parameter for computing the observables
npop= (1) number of theoretical populations
smooth= smoothing parameter alpha (None for no smoothing)
st96smooth= (False) if True, smooth the constraints (Syer & Tremaine 1996), if False, smooth the objective function and its derivative (Dehnen 2000)
output_wevolution= if set to an integer, return the time evolution of this many randomly selected weights
DATA DICTIONARIES:
The data dictionaries have the following form:
'type': type of measurement: 'dens', 'v2'
'pops': the theoretical populations included in this measurement;
single number or list
'zobs': vertical height of the observation
'zrange': width of vertical bin relative to some fiducial value (used to scale h_m2m, which should therefore be appropriate for the fiducial value)
'obs': the actual observation
'unc': the uncertainty in the observation
of these, zobs, obs, and unc can be arrays for mulitple measurements
OUTPUT:
(w_out,[zsun_out, [omega_out]],z_m2m,vz_m2m,Q_out,[wevol,rndindx]) -
(output weights [N],
[Solar offset [nstep] optional],
[omega [nstep] optional when fit_omega],
z_m2m [N] final z,
vz_m2m [N] final vz,
objective function as a function of time [nstep],
[weight evolution for randomly selected weights,index of random weights])
HISTORY:
2017-07-20 - Started from hom2m.fit_m2m - Bovy (UofT)
"""
if len(w_init.shape) == 1:
w_out= numpy.empty((len(w_init),npop))
w_out[:,:]= numpy.tile(copy.deepcopy(w_init),(npop,1)).T
else:
w_out= copy.deepcopy(w_init)
zsun_out= numpy.empty(nstep)
omega_out= numpy.empty(nstep)
if w_prior is None:
w_prior= w_out
# Parse data_dict
data_dict= parse_data_dict(data_dicts)
# Parse eps
if isinstance(eps,float):
eps= [eps]
if fit_zsun: eps.append(eps[0])
if fit_omega: eps.append(eps[0])
Q_out= []
if output_wevolution:
rndindx= numpy.random.permutation(len(w_out))[:output_wevolution]
wevol= numpy.zeros((output_wevolution,npop,nstep))
# Compute force of change for first iteration
fcw, delta_m2m_new= \
force_of_change_weights(w_out,zsun_m2m,z_init,vz_init,
data_dicts,prior,mu,w_prior,
h_m2m=h_m2m,kernel=kernel)
fcw*= w_out
fcz= 0.
if fit_zsun:
fcz= force_of_change_zsun(w_init,zsun_m2m,z_init,vz_init,
z_obs,dens_obs_noise,delta_m2m_new,
densv2_obs_noise,deltav2_m2m_new,
kernel=kernel,kernel_deriv=kernel_deriv,
h_m2m=h_m2m,use_v2=use_v2)
if not smooth is None:
delta_m2m= delta_m2m_new
else:
delta_m2m= [None for d in data_dicts]
if not smooth is None and not st96smooth:
Q= [d**2 for d in delta_m2m**2.]
# setup skipomega omega counter and prev. (z,vz) for F(omega)
#ocounter= skipomega-1 # Causes F(omega) to be computed in the 1st step
#z_prev, vz_prev= Aphi_to_zvz(A_init,phi_init-skipomega*step*omega_m2m,
# omega_m2m) #Rewind for first step
z_m2m, vz_m2m= z_init, vz_init
for ii in range(nstep):
# Update weights first
if True:
w_out+= eps[0]*step*fcw
w_out[w_out < 10.**-16.]= 10.**-16.
# then zsun
if fit_zsun:
zsun_m2m+= eps[1]*step*fcz
zsun_out[ii]= zsun_m2m
# then omega (skipped in the first step, so undeclared vars okay)
if fit_omega and ocounter == skipomega:
domega= eps[1+fit_zsun]*step*skipomega*fco
max_domega= delta_omega/30.
if numpy.fabs(domega) > max_domega:
domega= max_domega*numpy.sign(domega)
omega_m2m+= domega
# Keep (z,vz) the same in new potential
A_now, phi_now= zvz_to_Aphi(z_m2m,vz_m2m,omega_m2m)
ocounter= 0
# (Store objective function)
if not smooth is None and st96smooth:
Q_out.append([d**2. for d in delta_m2m])
elif not smooth is None:
Q_out.append(copy.deepcopy(Q))
else:
Q_out.append([d**2. for d in delta_m2m_new])
# Then update the dynamics
mass= numpy.sum(w_out,axis=1)
# (temporary?) way to deal with small masses
relevant_particles_index= mass > (numpy.median(mass[mass > 10.**-9.])*10.**-6.)
if numpy.any(mass[relevant_particles_index] < (10.**-8.*numpy.median(mass[relevant_particles_index]))):
print(numpy.sum(mass[relevant_particles_index] < (10.**-8.*numpy.median(mass[relevant_particles_index]))))
g= wendy.nbody(z_m2m[relevant_particles_index],
vz_m2m[relevant_particles_index],
mass[relevant_particles_index],
step,maxcoll=10000000)
tz_m2m, tvz_m2m= next(g)
z_m2m[relevant_particles_index]= tz_m2m
vz_m2m[relevant_particles_index]= tvz_m2m
z_m2m-= numpy.sum(mass*z_m2m)/numpy.sum(mass)
vz_m2m-= numpy.sum(mass*vz_m2m)/numpy.sum(mass)
# Compute force of change
if smooth is None or not st96smooth:
# Turn these off
tdelta_m2m= None
else:
tdelta_m2m= delta_m2m
fcw_new, delta_m2m_new= \
force_of_change_weights(w_out,zsun_m2m,z_m2m,vz_m2m,
data_dicts,prior,mu,w_prior,
h_m2m=h_m2m,kernel=kernel,
delta_m2m=tdelta_m2m)
fcw_new*= w_out
if fit_zsun:
if smooth is None or not st96smooth:
tdelta_m2m= delta_m2m_new
fcz_new= force_of_change_zsun(w_out,zsun_m2m,z_m2m,vz_m2m,
z_obs,dens_obs_noise,tdelta_m2m,
densv2_obs_noise,tdeltav2_m2m,
kernel=kernel,
kernel_deriv=kernel_deriv,
h_m2m=h_m2m,use_v2=use_v2)
if fit_omega:
omega_out[ii]= omega_m2m
# Update omega in this step?
ocounter+= 1
if ocounter == skipomega:
if not fit_zsun and (smooth is None or not st96smooth):
tdelta_m2m= delta_m2m_new
tdeltav2_m2m= deltav2_m2m_new
fco_new= force_of_change_omega(w_out,zsun_m2m,omega_m2m,
z_m2m,vz_m2m,z_prev,vz_prev,
step*skipomega,
z_obs,dens_obs,dens_obs_noise,
tdelta_m2m,
densv2_obs,densv2_obs_noise,
tdeltav2_m2m,
h_m2m=h_m2m,kernel=kernel,
delta_omega=delta_omega,
use_v2=use_v2)
z_prev= copy.copy(z_m2m)
vz_prev= copy.copy(vz_m2m)
# Increment smoothing
if not smooth is None and st96smooth:
delta_m2m= [d+step*smooth*(dn-d)
for d,dn in zip(delta_m2m,delta_m2m_new)]
fcw= fcw_new
if fit_zsun: fcz= fcz_new
if fit_omega and ocounter == skipomega: fco= fco_new
elif not smooth is None:
Q_new= [d**2. for d in delta_m2m_new]
Q= [q+step*smooth*(qn-q) for q,qn in zip(Q,Q_new)]
fcw+= step*smooth*(fcw_new-fcw)
if fit_zsun: fcz+= step*smooth*(fcz_new-fcz)
if fit_omega and ocounter == skipomega:
fco+= step*skipomega*smooth*(fco_new-fco)
else:
fcw= fcw_new
if fit_zsun: fcz= fcz_new
if fit_omega and ocounter == skipomega: fco= fco_new
# Record random weights if requested
if output_wevolution:
wevol[:,:,ii]= w_out[rndindx]
out= (w_out,)
if fit_zsun: out= out+(zsun_out,)
if fit_omega:
out= out+(omega_out,)
out= out+(z_m2m,vz_m2m,)
out= out+(numpy.array(Q_out),)
if output_wevolution:
out= out+(wevol,rndindx,)
return out
| {
"content_hash": "c9c9d85a358d839d004b79433a1461a5",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 170,
"avg_line_length": 44.94897959183673,
"alnum_prop": 0.5394627317442301,
"repo_name": "jobovy/simple-m2m",
"id": "dc95f90b77127cafb6f99cfbff0b0e1dbdb7274e",
"size": "13263",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/wendym2m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7104650"
},
{
"name": "Python",
"bytes": "67387"
}
],
"symlink_target": ""
} |
"""Tests for `tf.data.Dataset.random()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import test
class RandomTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(global_seed=[None, 10], local_seed=[None, 20])))
def testDeterminism(self, global_seed, local_seed):
expect_determinism = (global_seed is not None) or (local_seed is not None)
random_seed.set_random_seed(global_seed)
ds = dataset_ops.Dataset.random(seed=local_seed).take(10)
output_1 = self.getDatasetOutput(ds)
ds = self.graphRoundTrip(ds)
output_2 = self.getDatasetOutput(ds)
if expect_determinism:
self.assertEqual(output_1, output_2)
else:
# Technically not guaranteed since the two randomly-chosen int64 seeds
# could match, but that is sufficiently unlikely (1/2^128 with perfect
# random number generation).
self.assertNotEqual(output_1, output_2)
if __name__ == "__main__":
test.main()
| {
"content_hash": "ba16f66c67673796fc1a1419ac0e7085",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 34.65853658536585,
"alnum_prop": 0.7283603096410978,
"repo_name": "frreiss/tensorflow-fred",
"id": "b1b06cf439fafd4f81b6a4cd8aa80212ac015050",
"size": "2110",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/random_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import mraa
class Romeo():
""" Class for controlling the DFRobot Romeo for Edison
To drive motors, use ADC, PWM, etc., interface with AtMega8 board
byte array format:
( 0x55, 0xaa, 0x[cmd][x], 0x[arg] )
"""
# encoder counters
lCount = 0
rCount = 0
def __init__(self):
# set up I2C
self.i2c = mraa.I2c(1)
self.i2c.frequency(100000)
self.i2c.address(0x04)
self.leftWheelSpeed = 0
self.rightWheelSpeed = 0
self.leftWheelDirection = 0
self.rightWheelDirection = 0
self.stopped = False
# Motor corrections in range (0, 1)
self.L_CORRECTION = 1
self.R_CORRECTION = .888
## Encoder Counter Controls
def leftReset(self):
self.lCount = 0
def rightReset(self):
self.rCount = 0
# signs change to correspond to encoder orientation
def leftIncr(self, pos):
if pos:
self.lCount -= 1
else:
self.lCount += 1
def rightIncr(self, pos):
if pos:
self.rCount += 1
else:
self.rCount -= 1
# retrieve encoder counts
def getLCount(self):
return self.lCount
def getRCount(self):
return self.rCount
## Wheel Controls ##
# set the wheel directions
def setLeftWheel(self, direction):
"0-->CW; 1-->CCW"
direction = direction & 0xFF
self.sendI2C(bytearray([0x55, 0xaa, 0xB1, direction]))
self.leftWheelDirection = direction
def setRightWheel(self, direction):
"0-->CW; 1-->CCW"
direction = direction & 0xFF
self.sendI2C(bytearray([0x55, 0xaa, 0xB2, direction]))
self.rightWheelDirection = direction
# drive the wheels with a given PWM speed
def driveLeftWheel(self, speed):
"speed is in [0, 255]"
if (speed > 0) != (self.leftWheelDirection > 0):
self.setLeftWheel(not self.leftWheelDirection)
speed = abs(int(round(self.L_CORRECTION * speed))) & 0xFF
self.sendI2C(bytearray([0x55, 0xaa, 0xC1, speed]))
self.leftWheelSpeed = speed
def driveRightWheel(self, speed):
"speed is in [0, 255]"
if (speed > 0) != (self.rightWheelDirection > 0):
self.setRightWheel(not self.rightWheelDirection)
speed = abs(int(round(R_CORRECTION * speed))) & 0xFF
self.sendI2C(bytearray([0x55, 0xaa, 0xC2, speed]))
self.rightWheelSpeed = speed
# stop the robot
def stop(self):
self.sendI2C(bytearray([0x55, 0xaa, 0xC1, 0]))
self.sendI2C(bytearray([0x55, 0xaa, 0xC2, 0]))
self.stopped = True
# start the robot
def start(self):
self.stopped = False
## I2C Send ##
# necessary for controlling motor driver
def sendI2C(self, cmd):
cmd.append(sum(cmd)&0xFF)
self.i2c.write(cmd)
| {
"content_hash": "0ecfe68d32b7efa8262528b1140d0c69",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 69,
"avg_line_length": 31.604395604395606,
"alnum_prop": 0.5914464534075105,
"repo_name": "SSG-DRD-IOT/commercial-iot-security-system",
"id": "f63db324379305f8a6e467829edfacb42e9edc96",
"size": "3322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "romeo/robot/romeo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "Python",
"bytes": "278625"
}
],
"symlink_target": ""
} |
""" ifaddrs for Android
"""
from os.path import join, exists
import sh
from pythonforandroid.logger import info, shprint
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from pythonforandroid.toolchain import current_directory
class IFAddrRecipe(CompiledComponentsPythonRecipe):
version = '8f9a87c'
url = 'https://github.com/morristech/android-ifaddrs/archive/{version}.zip'
depends = ['hostpython3']
call_hostpython_via_targetpython = False
site_packages_name = 'ifaddrs'
generated_libraries = ['libifaddrs.so']
def prebuild_arch(self, arch):
"""Make the build and target directories"""
path = self.get_build_dir(arch.arch)
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path)
def build_arch(self, arch):
"""simple shared compile"""
env = self.get_recipe_env(arch, with_flags_in_cc=False)
for path in (
self.get_build_dir(arch.arch),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Lib'),
join(self.ctx.python_recipe.get_build_dir(arch.arch), 'Include')):
if not exists(path):
info("creating {}".format(path))
shprint(sh.mkdir, '-p', path)
cli = env['CC'].split()[0]
# makes sure first CC command is the compiler rather than ccache, refs:
# https://github.com/kivy/python-for-android/issues/1398
if 'ccache' in cli:
cli = env['CC'].split()[1]
cc = sh.Command(cli)
with current_directory(self.get_build_dir(arch.arch)):
cflags = env['CFLAGS'].split()
cflags.extend(['-I.', '-c', '-l.', 'ifaddrs.c', '-I.'])
shprint(cc, *cflags, _env=env)
cflags = env['CFLAGS'].split()
cflags.extend(['-shared', '-I.', 'ifaddrs.o', '-o', 'libifaddrs.so'])
cflags.extend(env['LDFLAGS'].split())
shprint(cc, *cflags, _env=env)
shprint(sh.cp, 'libifaddrs.so', self.ctx.get_libs_dir(arch.arch))
recipe = IFAddrRecipe()
| {
"content_hash": "20dcce49b25840833dbf2851e747ef66",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 82,
"avg_line_length": 39.148148148148145,
"alnum_prop": 0.5998107852412489,
"repo_name": "PKRoma/python-for-android",
"id": "7d44f9cd72a621a5f8923ff403497c9a2a32498b",
"size": "2114",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pythonforandroid/recipes/ifaddrs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "65170"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3107"
},
{
"name": "Cython",
"bytes": "15033"
},
{
"name": "Dockerfile",
"bytes": "3040"
},
{
"name": "HTML",
"bytes": "4330"
},
{
"name": "Java",
"bytes": "134825"
},
{
"name": "Makefile",
"bytes": "10159"
},
{
"name": "Python",
"bytes": "784620"
},
{
"name": "Shell",
"bytes": "1499"
},
{
"name": "kvlang",
"bytes": "17453"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class Operation(Model):
"""Operation.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values
include: 'InProgress', 'Failed', 'Succeeded', 'TimedOut', 'Created'
:type status: str or :class:`OperationStatus
<azure.mgmt.web.models.OperationStatus>`
:param errors: Any errors associate with the operation.
:type errors: list of :class:`ErrorEntity
<azure.mgmt.web.models.ErrorEntity>`
:param created_time: Time when operation has started.
:type created_time: datetime
:param modified_time: Time when operation has been updated.
:type modified_time: datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'OperationStatus'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(self, id=None, name=None, status=None, errors=None, created_time=None, modified_time=None, expiration_time=None, geo_master_operation_id=None):
self.id = id
self.name = name
self.status = status
self.errors = errors
self.created_time = created_time
self.modified_time = modified_time
self.expiration_time = expiration_time
self.geo_master_operation_id = geo_master_operation_id
| {
"content_hash": "0709c62ca679d49a7fdd22c908375ef9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 160,
"avg_line_length": 42.234042553191486,
"alnum_prop": 0.6387909319899244,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "faa4405e95312f7c3c94c0dcb8e9a1ccb94c7b90",
"size": "2459",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/operation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
from wagtail.wagtailcore.models import Page
from wagtail.tests.utils import WagtailPageTests
from wagtail_box.pages.models import StaticPage
class PagesModelTest(WagtailPageTests):
def test_static_page_child_of_anything(self):
"""
Ensures that a Static page can be child of any kind of Page
"""
self.assertCanCreateAt(Page, StaticPage)
| {
"content_hash": "b3c022032a52f4bf5a60e7c027d157d8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 31.25,
"alnum_prop": 0.7333333333333333,
"repo_name": "palazzem/wagtail-nesting-box",
"id": "6e6e2c3a24c346cf81b9146c9467d305036ceb7f",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pages_models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "5480"
},
{
"name": "Python",
"bytes": "33049"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_poly" , "BreastCancer" , "postgresql")
| {
"content_hash": "f481a4b3f9e5b1927026259c2a3f2318",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35,
"alnum_prop": 0.7785714285714286,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "943c153350bf7e6fb7d1616225f302d13b26133a",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BreastCancer/ws_BreastCancer_SVC_poly_postgresql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
from webvita import db, models
from passlib.apps import custom_app_context as pwd_context
def db_setup_dev(name, realname, pw, email):
db.drop_all()
db.create_all()
admin = models.User(name, realname, pwd_context.encrypt(pw), email)
db.session.add(admin)
db.session.commit()
def db_reset():
db.drop_all()
db.create_all()
#db_setup_dev('dummy', 'Mr. Dummy', 'dummy', 'dummy@mail.com')
#db_reset()
| {
"content_hash": "f56ff2435ede7650afe8b6277f4c58cc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 24.473684210526315,
"alnum_prop": 0.6129032258064516,
"repo_name": "niijv/webvita-heroku",
"id": "5703139576e2948c324ec76f66a5522e3cd43ca3",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12079"
},
{
"name": "Python",
"bytes": "17729"
}
],
"symlink_target": ""
} |
"""
Django settings for Lab3 project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l0ab0&9%g^-%(aic2)5rrkq0=x0+wry59ohnh#p5@5f=p-^dm6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Lab3'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Lab3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Lab3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'lab3',
'USER': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/lab3/static/'
STATICFILES_DIRS = [ os.path.join(BASE_DIR, "lab3/static") ]'
| {
"content_hash": "6310b4434a512be6999148ba265a33ea",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 26.264,
"alnum_prop": 0.6886993603411514,
"repo_name": "sanchaez/python_labs",
"id": "9e21f152dd137250e6b006c7346da2e411999986",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lab3/Lab3/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "24445"
},
{
"name": "JavaScript",
"bytes": "18616"
},
{
"name": "Python",
"bytes": "53410"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goals', '0144_auto_20160517_2104'),
]
operations = [
migrations.AddField(
model_name='action',
name='external_resource_type',
field=models.CharField(help_text='An internally-used field that makes it easier for client apps to determine how to handle the external_resource data.', max_length=32, choices=[('link', 'Link'), ('phone', 'Phone Number'), ('datetime', 'Date/Time')], blank=True),
),
]
| {
"content_hash": "846cdcbde4c13d1ba68bc0236506cce4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 274,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.6400651465798045,
"repo_name": "izzyalonso/tndata_backend",
"id": "6d2c5eaee66adf0dd4327119e7d7ace411a866bc",
"size": "638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/goals/migrations/0145_action_external_resource_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("atlas.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Skills application
url(r'^skills/', include("atlas.skills.urls", namespace="skills")),
# Rest Framework
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| {
"content_hash": "958f82cafe7c100b09c825a6ef8b5c38",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 83,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.6824644549763034,
"repo_name": "Kayra/atlas",
"id": "26c8aed360a437c52c5f09b30f40b158bc61913c",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1869"
},
{
"name": "HTML",
"bytes": "29436"
},
{
"name": "JavaScript",
"bytes": "26040"
},
{
"name": "Python",
"bytes": "48086"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
import mock
from mox3 import mox
from oslo.config import cfg
from oslo.utils import timeutils
from nova.compute import claims
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.tests.unit.compute import test_compute
from nova.tests.unit.image import fake as fake_image
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return resources
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
CONF.set_override('shelved_offload_time', shelved_offload_time)
instance = self._create_fake_instance_obj()
image_id = 'fake_image_id'
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
instance.task_state = task_states.SHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
mox.IgnoreArg())
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
self.assertEqual(123, instance.power_state)
if tracking['last_state'] == vm_states.ACTIVE:
if CONF.shelved_offload_time == 0:
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
else:
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED, instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
expected_task_state)
self.assertIn('shelved_at', instance.system_metadata)
self.assertEqual(image_id,
instance.system_metadata['shelved_image_id'])
self.assertEqual(host,
instance.system_metadata['shelved_host'])
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED_OFFLOADED,
instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_OFFLOADING],
expected_task_state)
tracking['last_state'] = instance.vm_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context,
instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.shelve_instance(self.context, instance,
image_id=image_id, clean_shutdown=clean_shutdown)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_forced_shutdown(self):
self._shelve_instance(-1, clean_shutdown=False)
def test_shelve_and_offload(self):
self._shelve_instance(0)
def _shelve_offload(self, clean_shutdown=True):
instance = self._create_fake_instance_obj()
instance.task_state = task_states.SHELVING
instance.save()
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save'):
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_shelve_offload(self):
self._shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._shelve_offload(clean_shutdown=False)
def test_unshelve(self):
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': 'fake_id'}
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_finish')
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
def fake_claim(context, instance, limits):
instance.host = self.compute.host
return claims.Claim(context, instance,
self.rt, _fake_resources())
tracking = {
'last_state': instance.task_state,
'spawned': False,
}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
if tracking['spawned']:
self.assertIsNone(instance.task_state)
else:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['spawned'] = True
tracking['last_state'] == instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
tracking['last_state'] == instance.task_state
else:
self.fail('Unexpected save!')
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.migrate_instance_finish(
self.context, instance, {'source_compute': '',
'dest_compute': self.compute.host})
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm',
flavor=None)
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(self.rt, 'instance_claim',
side_effect=fake_claim), \
mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
def test_unshelve_volume_backed(self):
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_finish')
tracking = {'last_state': instance.task_state}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertFalse(instance.auto_disk_config)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.migrate_instance_finish(
self.context, instance, {'source_compute': '',
'dest_compute': self.compute.host})
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(self.context, instance, self.rt,
_fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm',
flavor=None)
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
def test_shelved_poll_none_exist(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_not_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
(old, instance) = db.instance_update_and_get_original(self.context,
instance['uuid'], {'vm_state': vm_states.SHELVED,
'system_metadata': sys_meta})
def fake_destroy(inst, nw_info, bdm):
# NOTE(alaski) There are too many differences between an instance
# as returned by instance_update_and_get_original and
# instance_get_all_by_filters so just compare the uuid.
self.assertEqual(instance['uuid'], inst['uuid'])
self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
self.compute._poll_shelved_instances(self.context)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def test_shelve(self):
# Ensure instance can be shelved.
fake_instance = self._create_fake_instance_obj(
{'display_name': 'vm01'})
instance = fake_instance
self.assertIsNone(instance['task_state'])
def fake_init(self2):
# In original _FakeImageService.__init__(), some fake images are
# created. To verify the snapshot name of this test only, here
# sets a fake method.
self2.images = {}
def fake_create(self2, ctxt, metadata, data=None):
self.assertEqual(metadata['name'], 'vm01-shelved')
metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
return metadata
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
self.compute_api.shelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.SHELVING)
db.instance_destroy(self.context, instance['uuid'])
def test_unshelve(self):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
self.compute_api.unshelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
| {
"content_hash": "d4b66e1de1c857a94453b9a97e239442",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 79,
"avg_line_length": 43.77777777777778,
"alnum_prop": 0.5982764726714673,
"repo_name": "shakamunyi/nova",
"id": "f5985974dc8b6b898962f6317e3d1be224e759c6",
"size": "17515",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/tests/unit/compute/test_shelve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
} |
"""
This script is to test Google Cloud Vision OCR (text detect) API
Example CLI:
> python GoogleCloudOCR.py ./ch4_test/
Notice: need to create folders out_file, out_file3, out_file4 before run this.
"""
# for CLI parameter
import argparse
# for seraching folder and file IO
import base64
import os
import re
import sys
# for accessing Google Cloud API
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
is_verbose = False
do_show = False
if do_show:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# [START detect_text]
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}' # noqa
BATCH_SIZE = 10
class VisionApi:
"""Construct and use the Google Vision API service."""
def __init__(self, api_discovery_file='vision_api.json'):
self.credentials = GoogleCredentials.get_application_default()
self.service = discovery.build(
'vision', 'v1', credentials=self.credentials,
discoveryServiceUrl=DISCOVERY_URL)
def detect_text(self, input_filenames, num_retries=3, max_results=6):
"""Uses the Vision API to detect text in the given file.
"""
images = {}
for filename in input_filenames:
with open(filename, 'rb') as image_file:
images[filename] = image_file.read()
batch_request = []
for filename in images:
batch_request.append({
'image': {
'content': base64.b64encode(
images[filename]).decode('UTF-8')
},
'features': [{
'type': 'TEXT_DETECTION',
'maxResults': max_results,
}]
})
request = self.service.images().annotate(
body={'requests': batch_request})
try:
responses = request.execute(num_retries=num_retries)
if 'responses' not in responses:
return {}
text_response = {}
for filename, response in zip(images, responses['responses']):
if 'error' in response:
print("API Error for %s: %s" % (
filename,
response['error']['message']
if 'message' in response['error']
else ''))
continue
if 'textAnnotations' in response:
text_response[filename] = response['textAnnotations']
else:
text_response[filename] = []
return text_response
except errors.HttpError as e:
print("Http Error for %s: %s" % (filename, e))
except KeyError as e2:
print("Key error: %s" % e2)
# [END detect_text]
# [START get_text]
def get_text_from_files(vision, input_filenames, input_dir):
"""Call the Vision API on a file. """
texts = vision.detect_text(input_filenames)
len_dir = len(input_dir)
for filename, text in texts.items():
# create result file for each image
import codecs
out_file = './outfile_1/' + 'res_' + filename[len_dir : -3] + 'txt'
csvfile = codecs.open(out_file, 'wb', 'UTF-8')
out_file3 = './outfile_3/' + 'res_' + filename[len_dir : -3] + 'txt'
csvfile3 = codecs.open(out_file3, 'wb', 'UTF-8')
out_file4 = './outfile_4/' + 'res_' + filename[len_dir : -3] + 'txt'
csvfile4 = codecs.open(out_file4, 'wb', 'UTF-8')
if len(text) <= 1:
print "===================="
print "file name: %s\n" % (filename)
print "detect nothing!"
continue
for e in text[1:]:
try:
vertices = []
for bound in e['boundingPoly']['vertices']:
x, y = bound['x'], bound['y']
if x <= 0:
print('file name: {}: x = {} < 0'.format(filename, x))
x = 1
elif x >= 1280:
print('file name: {}: x = {} > 1280'.format(filename, x))
x = 1279
if y <= 0:
print('file name: {}: y = {} < 0'.format(filename, y))
y = 1
elif y >= 1280:
print('file name: {}: y = {} > 1280'.format(filename, y))
y = 1279
vertices.append(str(x) + ',' + str(y))
except KeyError, er:
print "===================="
print "file name: %s\n" % (filename)
print('Key Error: {}'.format(er))
print e['boundingPoly']['vertices']
continue
out_str = [e['description']]
csvfile.write(','.join(vertices)+'\r\n')
csvfile3.write(filename[len_dir+1:] + ',' + e['description'] + '\r\n')
#out_str.extend(vertices)
vertices.extend(out_str)
csvfile4.write(','.join(vertices)+'\r\n')
if is_verbose:
print('{}'.format(','.join(out_str)))
csvfile.close()
csvfile3.close()
csvfile4.close()
def batch(iterable, batch_size=BATCH_SIZE):
"""Group an iterable into batches of size batch_size.
>>> tuple(batch([1, 2, 3, 4, 5], batch_size=2))
((1, 2), (3, 4), (5))
"""
b = []
for i in iterable:
b.append(i)
if len(b) == batch_size:
yield tuple(b)
b = []
if b:
yield tuple(b)
def main(input_dir):
"""Walk through all the not-yet-processed image files in the given
directory, extracting any text from them and adding that text to an
inverted index.
"""
# Create a client object for the Vision API
vision = VisionApi()
allfileslist = []
# Recursively construct a list of all the files in the given input
# directory.
for folder, subs, files in os.walk(input_dir):
for filename in files:
allfileslist.append(os.path.join(folder, filename))
fileslist = []
for filename in allfileslist:
fileslist.append(filename)
for filenames in batch(fileslist):
get_text_from_files(vision, filenames, input_dir)
# [END get_text]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Detects text in the images in the given directory.')
parser.add_argument(
'input_directory',
help='the image directory you\'d like to detect text in.')
args = parser.parse_args()
main(args.input_directory)
if do_show:
image = mpimg.imread('./test_img/IMG_0531.JPG')
plt.imshow(image,cmap='gray')
plt.show()
| {
"content_hash": "9a3687cfed4eefd06989de86b586fec7",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 91,
"avg_line_length": 33.69607843137255,
"alnum_prop": 0.5245853942391621,
"repo_name": "RobinCPC/CE264-Computer_Vision",
"id": "2cab9a9366e4c4db6ca8a65586eb0aa187c8e8c1",
"size": "6897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocr_script/GoogleCloudOCR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54066"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.db import models
class Oauth20Service(models.Model):
access_token = models.CharField(max_length=255, unique=True)
unique_id = models.CharField(max_length=255, unique=True)
user = models.ForeignKey(User, related_name="%(class)ss")
class Meta:
abstract = True
| {
"content_hash": "ce64445f0aca7a85db09cc422137dd49",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 31,
"alnum_prop": 0.7243401759530792,
"repo_name": "dgouldin/django-socialite",
"id": "5d2618e18f87cda6c6ffccdd88faf765564a8344",
"size": "341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialite/apps/base/oauth20/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55114"
}
],
"symlink_target": ""
} |
from _socket import error as soc_error
import logging
import struct
import traceback
import gevent
import gevent.server
import gevent.socket
from gevent import Timeout
from recall.codec import serialize_message, parse_message, parse_meta
from recall.controller import RpcController
from recall.proto import rpc_meta_pb2
from recall.util import Pool
class RpcServerStat(object):
class MethodStat(object):
def __init__(self):
self.call_num_per_min = 0
self.total_call_num = 0
def reset_stat_per_min(self):
self.call_num_per_min = 0
class ServiceStat(object):
def __init__(self):
self.call_num_per_min = 0
self.total_call_num = 0
self.method_stats = {}
def add_method_stat(self, method, count):
try:
method_stat = self.method_stats[method]
except KeyError:
method_stat = RpcServerStat.MethodStat()
self.method_stats[method] = method_stat
method_stat.call_num_per_min += count
method_stat.total_call_num += count
def reset_stat_per_min(self):
self.call_num_per_min = 0
for method_stat in self.method_stats.itervalues():
method_stat.reset_stat_per_min()
def __init__(self):
self.service_stats = {}
def add_method_stat(self, service, method, count):
try:
service_stat = self.service_stats[service]
except KeyError:
service_stat = RpcServerStat.ServiceStat()
self.service_stats[service] = service_stat
service_stat.call_num_per_min += count
service_stat.total_call_num += count
service_stat.add_method_stat(method, count)
def print_stat(self):
print 'name\t| total_count\t| count/60s'
for service_name, service_stat in self.service_stats.iteritems():
print '%s\t| %d\t| %d' % (service_name, service_stat.total_call_num,
service_stat.call_num_per_min)
for method_name, method_stat in service_stat.method_stats.iteritems():
print ' %s\t| %d\t| %d' % (method_name, method_stat.total_call_num,
method_stat.call_num_per_min)
print ''
def reset_stat_per_min(self):
for service_stat in self.service_stats.itervalues():
service_stat.reset_stat_per_min()
class BuiltinServiceImpl(rpc_meta_pb2.BuiltinService):
def HeartBeat(self, rpc_controller, request, done):
rsp = rpc_meta_pb2.HeartBeatResponse()
if request.magic_num == 4321:
rsp.return_code = 0
else:
rsp.return_code = 1
return rsp
class RpcServer(object):
def __init__(self, addr, service_timeout=10, spawn=1000):
if isinstance(addr, str):
self._addr = addr.split(':') # addr string like '127.0.0.1:30006'
self._addr[1] = int(self._addr[1])
else:
self._addr = addr
if isinstance(spawn, (int, long)):
self._pool = Pool(spawn)
self._spawn = self._pool.spawn
else:
self._pool = None
self._spawn = spawn
self._services = {}
self._service_timeout = service_timeout
self._stat = RpcServerStat()
self._stream_server = gevent.server.StreamServer(self._addr,
self._handle_connection)
self._register_builtin_services()
def _handle_connection(self, socket, addr):
socket.setsockopt(gevent.socket.SOL_TCP, gevent.socket.TCP_NODELAY, 1)
socket.setsockopt(gevent.socket.IPPROTO_TCP, gevent.socket.TCP_NODELAY, 1)
rsp_queue = gevent.queue.Queue()
is_connection_closed = [False]
def call_service(req_info):
meta_info, service, method, req = req_info
self._stat.add_method_stat(meta_info.service_name,
meta_info.method_name, 1)
controller = RpcController()
try:
with Timeout(self._service_timeout):
rsp = service.CallMethod(method, controller, req, None)
except Timeout:
meta_info.has_error = True
rsp = rpc_meta_pb2.ErrorResponse(err_code=rpc_meta_pb2.SERVER_SERVICE_TIMEOUT,
err_msg='service timeout')
except:
meta_info.has_error = True
err_msg = 'Error calling service: ' + traceback.format_exc()
rsp = rpc_meta_pb2.ErrorResponse(err_code=rpc_meta_pb2.SERVER_SERVICE_ERROR,
err_msg=err_msg)
rsp_queue.put_nowait((meta_info, rsp))
def recv_req():
content = ""
while True:
try:
recv_buf = socket.recv(1024)
if len(recv_buf) == 0:
break
except Exception, e:
logging.warning('recv_req error: ' + str(e))
break
content += recv_buf
mem_content = memoryview(content)
cur_index = 0
while cur_index < len(content):
if len(mem_content[cur_index:]) < 6:
break
elif mem_content[cur_index:cur_index + 2] != 'PB':
cur_index += 2 # skip the first 2 bytes
break
(buf_size,) = struct.unpack('!I',
mem_content[cur_index + 2: cur_index + 6].tobytes())
if len(mem_content[cur_index + 6:]) < buf_size:
break
pb_buf = mem_content[cur_index + 6: cur_index + 6 + buf_size].tobytes()
cur_index += buf_size + 6
result = self.parse_message(pb_buf)
if result is None:
logging.warning('pb decode error, skip this message')
break
self._spawn(call_service, result)
if cur_index > 0:
content = content[cur_index:]
logging.info(str(addr) + 'has disconnected')
is_connection_closed[0] = True
def send_rsp():
while not is_connection_closed[0]:
try:
meta_info, rsp = rsp_queue.get(timeout=1)
except gevent.queue.Empty:
continue
serialized_rsp = serialize_message(meta_info, rsp)
sent_bytes = 0
try:
while sent_bytes < len(serialized_rsp):
sent_bytes += socket.send(serialized_rsp[sent_bytes:])
except soc_error as e:
logging.warning('socket send error: ' + str(e))
break
workers = [gevent.spawn(recv_req), gevent.spawn(send_rsp)]
gevent.joinall(workers)
def parse_message(self, buf):
result = parse_meta(buf)
if result is None:
return None
meta_len, pb_msg_len, meta_info = result
# try to find the service
try:
service = self._services[meta_info.service_name]
except KeyError:
logging.warning('cannot find the service: ' + meta_info.service_name)
return None
method = service.GetDescriptor().FindMethodByName(meta_info.method_name)
if method is None:
logging.warning('cannot find the method: ' + meta_info.method_name)
return None
msg = parse_message(buf[8 + meta_len:8 + meta_len + pb_msg_len],
service.GetRequestClass(method))
if msg is None:
return None
else:
return meta_info, service, method, msg
def register_service(self, service):
self._services[service.GetDescriptor().full_name] = service
def _register_builtin_services(self):
self.register_service(BuiltinServiceImpl())
def print_stat(self, interval):
while True:
gevent.sleep(interval)
self._stat.print_stat()
self._stat.reset_stat_per_min()
def run(self, print_stat_interval=None):
if print_stat_interval is not None and print_stat_interval > 0:
stat_worker = gevent.spawn(self.print_stat, print_stat_interval)
try:
self._stream_server.serve_forever()
finally:
stat_worker.kill()
if self._pool is not None:
self._pool.join() | {
"content_hash": "961385b4fa4888e8bcbf678d1f072353",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 100,
"avg_line_length": 35.96734693877551,
"alnum_prop": 0.5306400363141172,
"repo_name": "airekans/recall",
"id": "5551597b014949fe69c2149835921c6d39cf95ca",
"size": "8812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recall/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84364"
}
],
"symlink_target": ""
} |
import numpy as np
from hmis.general import calc_age
from datetime import datetime
################################################################################
# Gets IDs within a certain age range.
################################################################################
def select_by_age(master_dictionary,lo=0, hi=1e9, date_to_calc_age=None):
"""
This function returns the dictionaries of the individuals within the age range.
Args:
**master_dictionary** (list): Full list of the dictionaries.
**lo** (int): The lower bound of the targeted age range.
*Defaults to: 0*
**hi** (int): The upper bound of the targeted age range.
*Defaults to: 1e9*
Returns:
**dictionary_subset** (list): The list of dictionaries of the individuals that are within the age range.
"""
# Put the date_to_calc_age into a datetime.datetime object
if date_to_calc_age is not None:
if type(date_to_calc_age) == str:
date_to_calc_age = get_date_from_string(date_to_calc_age)
else:
date_to_calc_age = datetime.now()
# Gets the personal IDs within the age range specified.
personal_IDs=[]
for num,ind in enumerate(master_dictionary):
age = calc_age(ind['DOB'],date_to_calc_age)
age = age.days/365.0 # Convert to years as float
if age>=lo and age<=hi:
personal_IDs.append(ind['Personal ID'])
personal_IDs=np.unique(personal_IDs)
personal_IDs.sort()
print("%d people have been selected." % (len(personal_IDs)))
dictionary_subset = subset_from_dictionary(personal_IDs,master_dictionary)
return dictionary_subset
################################################################################
# Gets information from the selected personal IDs passed through
################################################################################
def subset_from_dictionary(personal_IDs,full_dictionary,matching_key='Personal ID'):
""" This function gets the subset of dictionaries from the personal IDs that are passed in.
Args:
**personal_IDs** (array): The list of personal IDs to get the dictionaries.
**full_dictionary** (list): The full list of dictionaries that has been made.
**matching_key** (string): The key that determines the cross referencing between the files.
*Defaults to: 'Personal ID'*
Returns:
**inds** (list): The subset of dictionaries with the personal IDs inputted.
"""
inds = []
for pid in personal_IDs:
for client in full_dictionary:
if client[matching_key]==pid:
inds.append(client)
break
return inds
# ASK CARES FOLKS IF THIS SHOULD BE NAMED BY PROGRAMS OR PROJECTS
def select_by_number_of_programs(master_dictionary, num_of_programs):
"""
This function returns the dictionaries of the individuals that have at least the number of programs entered.
Args:
**master_dictionary** (list): Full list of the dictionaries.
**num_of_programs** (int): The lower number to how many programs an individual must have to be returned.
Returns:
**dictionary_subset** (list): The list of dictionaries of the individuals that have at least the number of programs inputted.
"""
personal_IDs = []
for num,ind in enumerate(master_dictionary):
prog_list = ind['Programs']
if len(prog_list) > (num_of_programs -1):
personal_IDs.append(ind['Personal ID'])
personal_IDs=np.unique(personal_IDs)
personal_IDs.sort()
print((len(personal_IDs)))
dictionary_subset = subset_from_dictionary(personal_IDs,master_dictionary)
return dictionary_subset
def select_by_program_type(master_dictionary, prog_type):
"""
This function returns the dictionaries of the individuals that have stayed at the inputted program type.
Args:
**master_dictionary** (list): Full list of the dictionaries.
**prog_type** (str): The type of prgram that the individual must have stayed at.
Returns:
**dictionary_subset** (list): The list of dictionaries of the individuals that have
"""
personal_IDs = []
for num, ind in enumerate(master_dictionary):
prog_list = ind['Programs']
for p in prog_list:
if (p['Project type'] == prog_type):
personal_IDs.append(ind['Personal ID'])
personal_IDs=np.unique(personal_IDs)
personal_IDs.sort()
print((len(personal_IDs)))
dictionary_subset = subset_from_dictionary(personal_IDs,master_dictionary)
return dictionary_subset
################################################################################
# Get information from the original data
################################################################################
def get_additional_info(IDs,idtype='Personal',org_data=None,info=None):
""" This function gets additional information on an individual,
project, or an indiviuals entry into a project based on their PersonalID,
ProjectID, or ProjectEntryID respectively.
Args:
**IDs** (list or string): The list of IDs as strings or a single ID.
**idtype** (string): 'Personal' or 'Project' or 'ProjectEntry' which
tells the program what type of data to retrieve.
**org_data**: (dictionary of Panda data frames) This is the output of the
read_in_data command.
**info** (list or string): This is a string or list of strings, where
the strings are the headers of the Pandas dataframes and the information
to be returned.
Return:
**information** (dictionary) This is a dictionary with the keys representing
the IDs passed in and the values are dictionaries with those keys being
the different pieces of information passed in with the info variable.
"""
# Error checking
if idtype != 'Personal' and idtype != 'Project' and idtype != 'ProjectEntry':
print("type must be \'Personal\' or \'Project\' or \'ProjectEntry\'!!!")
print("Instead, idtype is %s" % (idtype))
print("Returning from get_additional_info() without doing anything")
return None
if org_data is None:
print("org_data must be passed in!")
print("Instead, org_data is %s" % (org_data))
print("This is the original data as returned by the read_in_data() function")
print("Returning from get_additional_info() without doing anything")
return None
if info is None:
print("info must be passed in!")
print("Instead, info is %s" % (info))
print("This should be a header or headers (as a list) for the original files.")
print("Returning from get_additional_info() without doing anything")
return None
# Get the list of original .csv files from which we'll look for this info.
# We can add to this list later, if there is interest.
list_of_files = []
idkey = "%sID" % (idtype)
if idtype=='Personal':
list_of_files.append('Client')
elif idtype=='ProjectEntry':
list_of_files.append('Enrollment')
list_of_files.append('Exit')
elif idtype=='Project':
list_of_files.append('Site')
list_of_files.append('Project')
if type(IDs)==str:
IDs = [IDs]
if type(info)==str:
info = [info]
# Check that the info keys are actually in the headers, including the idkey
# which will be PersonalID or ProjectID or ProjectEntry.
for header in info + [idkey]:
found_header = False
for name in list_of_files:
# List of headers from dataframe
headers = list(org_data[name].columns.values)
if header in headers:
found_header = True
break
if found_header==False:
print("%s not found in any of the headers in the files!" % (header))
print("Returning from get_additional_info() without doing anything")
return None
values = {}
for ID in IDs:
# For the person or project
values[ID] = {}
for header in info:
# Loop over the different files in which to look.
for name in list_of_files:
filedata = org_data[name]
# We are going to assume that the ID only appears once!
index = filedata[filedata[idkey] == ID].index.tolist()
if len(index)==1:
index = index[0]
elif len(index)==0:
break
else:
print("%s appears more than once in the %s file!!!" % (header, name))
print("Using only the first appearence, but this might not be right!")
index = index[0]
if header in list(filedata.columns.values):
value = filedata.iloc[index][header]
if value != value:
value = "EMPTY"
values[ID][header] = value
return values
| {
"content_hash": "44fbd541e937b07e3041829489ae43e0",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 133,
"avg_line_length": 33.07719298245614,
"alnum_prop": 0.5732470563275698,
"repo_name": "mattbellis/hmis",
"id": "a5733fb4283c04ee748eae129d63810b862b7493",
"size": "9427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hmis/selection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10937"
},
{
"name": "Python",
"bytes": "54237"
},
{
"name": "TeX",
"bytes": "669"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('bus', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TicketBooking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=70)),
('email', models.EmailField(max_length=254)),
('phone_number', models.CharField(max_length=25)),
('booking_status', models.CharField(choices=[('P', 'Pending'), ('C', 'Confirmed')], default='P', max_length=2)),
('number_of_seats', models.IntegerField(default=1)),
('booking_date', models.DateTimeField(verbose_name='Date of travel')),
('time_created', models.DateTimeField(auto_now_add=True)),
('time_update', models.DateTimeField(auto_now=True)),
('bus_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bus.BusDetails')),
],
options={
'verbose_name_plural': 'Booked Tickets',
},
),
]
| {
"content_hash": "59c8493a844a8721fbfc5c3a44018a1f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 128,
"avg_line_length": 38.911764705882355,
"alnum_prop": 0.5691609977324263,
"repo_name": "warlock57/bus_reservation",
"id": "ad50eded6325dacbae4fcd2d15494a3f437b048b",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bus_ticket/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "588350"
},
{
"name": "HTML",
"bytes": "109044"
},
{
"name": "JavaScript",
"bytes": "411869"
},
{
"name": "Python",
"bytes": "78652"
}
],
"symlink_target": ""
} |
import os
import re
import shutil
import tempfile
from xml.dom import minidom
from oslo.config import cfg
from nova.image import glance
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt import images
from pcsnovadriver.pcs import prlsdkapi_proxy
from pcsnovadriver.pcs import utils as pcsutils
pc = prlsdkapi_proxy.consts
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_template(driver, context, instance, image_meta):
if image_meta['disk_format'] == 'ez-template':
return EzTemplate(driver, context, instance, image_meta)
else:
return DiskTemplate(driver, context, instance, image_meta)
class PCSTemplate(object):
def __init__(self, driver, context, instance, image_meta):
LOG.info("%s.__init__" % self.__class__.__name__)
def create_instance(self, instance):
raise NotImplementedError()
class EzTemplate(PCSTemplate):
def __init__(self, driver, context, instance, image_meta):
PCSTemplate.__init__(self, driver, context, instance, image_meta)
self.driver = driver
self.rpm_path = None
self.instance = instance
name, version, release = self._get_remote_info(context, image_meta)
lname, lversion, lrelease = self._get_rpm_info(pkg=name)
LOG.info("Glance template: %s-%s-%s, local rpm: %s-%s-%s" %
(name, version, release, lname, lversion, lrelease))
self.name = name[:-3]
if not lname:
self._download_rpm(context, image_meta)
LOG.info("installing rpm for template %s" % name)
utils.execute('rpm', '-i', self.rpm_path, run_as_root=True)
else:
x = self._cmp_version_release(version, release, lversion, lrelease)
if x == 0:
return
elif x < 0:
self._download_rpm(context, image_meta)
LOG.info("updating rpm for template %s" % name)
utils.execute('rpm', '-U', file, run_as_root=True)
else:
LOG.warn("local rpm is newer than remote one!")
def _download_rpm(self, context, image_meta):
LOG.info("_download_rpm")
if self.rpm_path:
return
if image_meta['name']:
name = image_meta['name']
else:
name = image_meta['id']
if CONF.tempdir:
tempdir = CONF.tempdir
else:
tempdir = tempfile.gettempdir()
rpm_path = os.path.join(tempdir, name)
images.fetch(context, self.instance['image_ref'], rpm_path,
self.instance['user_id'], self.instance['project_id'])
self.rpm_path = rpm_path
def _get_remote_info(self, context, image_meta):
LOG.info("_get_remote_info")
for prop in 'pcs_name', 'pcs_version', 'pcs_release':
if prop not in image_meta['properties']:
self._download_rpm(context, image_meta)
name, ver, rel = self._get_rpm_info(file=self.rpm_path)
if not name:
raise Exception("Invalid rpm file: %s" % self.rpm_path)
return (image_meta['properties']['pcs_name'],
image_meta['properties']['pcs_version'],
image_meta['properties']['pcs_release'])
def _get_rpm_info(self, file=None, pkg=None):
LOG.info("_get_rpm_info")
cmd = ['rpm', '-q', '--qf', '%{NAME},%{VERSION},%{RELEASE}']
if file:
cmd += ['-p', file]
else:
cmd.append(pkg)
try:
out, err = utils.execute(*cmd)
except processutils.ProcessExecutionError:
return None, None, None
LOG.info("out: %r" % out)
return tuple(out.split(','))
def _cmp_version(self, ver1, ver2):
ver1_list = ver1.split('.')
ver2_list = ver2.split('.')
if len(ver1_list) > len(ver2_list):
return -1
elif len(ver1_list) < len(ver2_list):
return 1
else:
i = 0
for i in range(len(ver1_list)):
if int(ver1_list[i]) > int(ver2_list[i]):
return -1
elif int(ver1_list[i]) < int(ver2_list[i]):
return 1
return 0
def _cmp_version_release(self, ver1, rel1, ver2, rel2):
x = self._cmp_version(ver1, ver2)
if x:
return x
else:
return self._cmp_version(rel1, rel2)
def create_instance(self):
sdk_ve = self.driver.psrv.get_default_vm_config(pc.PVT_CT,
'vswap.1024MB', 0, 0).wait()[0]
sdk_ve.set_uuid(self.instance['uuid'])
sdk_ve.set_name(self.instance['name'])
sdk_ve.set_vm_type(pc.PVT_CT)
sdk_ve.set_os_template(self.name)
sdk_ve.reg('', True).wait()
return sdk_ve
class DiskTemplate(PCSTemplate):
"""This class is for templates, based on disk images,
stored in glance.
"""
def __init__(self, driver, context, instance, image_meta):
PCSTemplate.__init__(self, driver, context, instance, image_meta)
self.driver = driver
self.context = context
self.instance = instance
self.image_meta = image_meta
def _create_ct(self):
sdk_ve = self.driver.psrv.get_default_vm_config(pc.PVT_CT,
'vswap.1024MB', 0, 0).wait()[0]
sdk_ve.set_uuid(self.instance['uuid'])
sdk_ve.set_name(self.instance['name'])
sdk_ve.set_vm_type(pc.PVT_CT)
sdk_ve.set_os_template(self.image_meta['properties']['pcs_ostemplate'])
LOG.info("Creating container from eztemplate ...")
sdk_ve.reg('', True).wait()
disk_path = sdk_ve.get_home_path()
disk_path = os.path.join(disk_path, 'root.hdd')
LOG.info("Removing original disk ...")
utils.execute('rm', '-rf', disk_path, run_as_root=True)
self.driver.image_cache.put_image(self.context,
self.instance['image_ref'], self.image_meta, disk_path)
LOG.info("Done")
return sdk_ve
def _create_vm(self):
sdk_ve = self.driver._create_blank_vm(self.instance)
# copy hard disk to VM directory
ve_path = os.path.dirname(sdk_ve.get_home_path())
disk_path = os.path.join(ve_path, "harddisk.hdd")
self.driver.image_cache.put_image(self.context,
self.instance['image_ref'], self.image_meta, disk_path)
# add hard disk to VM config and set is as boot device
srv_cfg = self.driver.psrv.get_srv_config().wait().get_param()
sdk_ve.begin_edit().wait()
hdd = sdk_ve.add_default_device_ex(srv_cfg, pc.PDE_HARD_DISK)
hdd.set_image_path(disk_path)
b = sdk_ve.create_boot_dev()
b.set_type(pc.PDE_HARD_DISK)
b.set_index(hdd.get_index())
b.set_sequence_index(0)
b.set_in_use(1)
sdk_ve.commit().wait()
return sdk_ve
def create_instance(self):
props = self.image_meta['properties']
if 'vm_mode' in props and props['vm_mode'] not in ['hvm', 'exe']:
raise Exception("Unsupported VM mode '%s'" % props['vm_mode'])
if not 'vm_mode' in props or props['vm_mode'] == 'hvm':
return self._create_vm()
elif props['vm_mode'] == 'exe':
return self._create_ct()
class ImageCache(object):
"""Base class for image cache handlers. There is only one
operation: put image to the specified destination. If image
is not in cache - it should be downloaded.
"""
def put_image(self, context, image_ref, image_meta, dst):
raise NotImplementedError()
class LZRWImageCache(ImageCache):
"""Class for retrieving from cache of LZRW images.
There are 3 actions on cached image:
1. cache
2. unpack
3. remove (will be done in ImageCacheManager later)
So we need to synchronize these places. All these actions
can be executed from separate threads.
The idea is that since cache images are regular files, we
can open file and unpack using that fd. So removing cached
image while unpacking it will not fail.
Several unpacks can work simultaneously, because they just
read file contents.
To forbid unpacking image while caching it it's cached to the
temporary file and then first opened and then renamed. So
if cached file exists - it can be unpacked. If doesn't exists
it has to be cached.
Several caching operation protected by lock. So if we got lock
then ether file is not cached and nobody will try to cache it
until we release lock or someone already cached file before us.
In this case we just open a file and release lock.
Several remove operations can be a problem. We need to check if
manage_image_cache can be called from several threads
simultaneously.
"""
def __init__(self):
if not os.path.exists(CONF.pcs_template_dir):
utils.execute('mkdir', '-p', CONF.pcs_template_dir,
run_as_root=True)
utils.execute('chown', 'nova:nova', CONF.pcs_template_dir,
run_as_root=True)
self.images_dir = os.path.join(CONF.pcs_template_dir, 'images')
self.locks_dir = os.path.join(CONF.pcs_template_dir, 'locks')
self.tmp_dir = os.path.join(CONF.pcs_template_dir, 'tmp')
for d in self.images_dir, self.locks_dir, self.tmp_dir:
if not os.path.exists(d):
os.mkdir(d)
self.name_suffix = '.tar.lzrw'
def _get_cached_file(self, image_id):
return os.path.join(self.images_dir, image_id + self.name_suffix)
def _cache_image(self, context, image_ref, image_meta, dst):
downloader = get_downloader(image_meta['disk_format'])
LOG.info('Downloading image %s (%s) from glance' %
(image_meta['name'], image_ref))
downloader.fetch_to_lzrw(context, image_ref, image_meta, dst)
def _open(self, path):
try:
f = open(path)
return f
except IOError as e:
if e.errno != os.errno.ENOENT:
raise
return None
def _open_cached_file(self, context, image_ref, image_meta, dst):
image_id = image_meta['id']
fpath = self._get_cached_file(image_id)
f = self._open(fpath)
if f:
return f
with lockutils.lock(image_id, external=True, lock_path=self.locks_dir):
f = self._open(fpath)
if f:
return f
tmp = tempfile.mktemp(dir=self.tmp_dir)
self._cache_image(context, image_ref, image_meta, tmp)
f = open(tmp)
os.rename(tmp, fpath)
return f
def put_image(self, context, image_ref, image_meta, dst):
utils.execute('mkdir', dst, run_as_root=True)
f = self._open_cached_file(context, image_ref, image_meta, dst)
try:
LOG.info("Unpacking image %s to %s" %
(self._get_cached_file(image_meta['id']), dst))
pcsutils.uncompress_ploop(None, dst, src_file=f,
root_helper=utils._get_root_helper())
finally:
f.close()
def list_images(self):
files = os.listdir(self.images_dir)
images = map(lambda x: x[:-len(self.name_suffix)], files)
return images
def delete_image(self, image_id):
os.unlink(self._get_cached_file(image_id))
class ImageDownloader(object):
"""Subclasses of this class download images from glance
to local image cache with all needed conversions.
"""
def fetch_to_lzrw(self, context, image_ref, image_meta, dst):
raise NotImplementedError()
class BasePloopDownloader(ImageDownloader):
def _download_ploop(self, context, image_ref,
image_meta, image_service, dst):
raise NotImplementedError()
def fetch_to_lzrw(self, context, image_ref, image_meta, dst):
tmpl_dir = os.path.join(CONF.pcs_template_dir,
'tmp', image_meta['id'])
if os.path.exists(tmpl_dir):
shutil.rmtree(tmpl_dir)
os.mkdir(tmpl_dir)
image_service = glance.get_remote_image_service(context, image_ref)[0]
self._download_ploop(context, image_ref, image_meta,
image_service, tmpl_dir)
LOG.info("Packing image to %s" % dst)
pcsutils.compress_ploop(tmpl_dir, dst)
shutil.rmtree(tmpl_dir)
class PloopDownloader(BasePloopDownloader):
"Dowload images in ploop format."
def _get_image_name(self, disk_descriptor):
doc = minidom.parseString(disk_descriptor)
disk_image = doc.firstChild
items = disk_image.getElementsByTagName('StorageData')
if len(items) != 1:
raise Exception('Invalid DiskDescriptor.xml')
storage_data = items[0]
items = storage_data.getElementsByTagName('Storage')
if len(items) != 1:
raise Exception('Invalid DiskDescriptor.xml')
storage = items[0]
images = storage.getElementsByTagName('Image')
if len(images) != 1:
raise Exception('Ploop contains spapshots')
image = images[0]
files = image.getElementsByTagName('File')
if len(files) != 1:
raise Exception('Invalid DiskDescriptor.xml')
file = files[0]
text = file.firstChild
if text.nodeType != text.TEXT_NODE:
raise Exception('Invalid DiskDescriptor.xml')
return text.nodeValue
def _download_ploop(self, context, image_ref,
image_meta, image_service, dst):
dd = image_meta['properties']['pcs_disk_descriptor']
image_name = self._get_image_name(dd)
with open(os.path.join(dst, image_name), 'w') as f:
image_service.download(context, image_ref, f)
with open(os.path.join(dst, 'DiskDescriptor.xml'), 'w') as f:
f.write(image_meta['properties']['pcs_disk_descriptor'])
class QemuDownloader(BasePloopDownloader):
"""This class downloads images in formats, which
qemu-img supports.
"""
def _download_ploop(self, context, image_ref,
image_meta, image_service, dst):
glance_img = 'glance.img'
glance_path = os.path.join(dst, glance_img)
with open(glance_path, 'w') as f:
image_service.download(context, image_ref, f)
out, err = utils.execute('qemu-img', 'info',
'--output=json', glance_path)
img_info = jsonutils.loads(out)
size = int(img_info['virtual-size'])
utils.execute('ploop', 'init', '-s',
'%dK' % (size >> 10), os.path.join(dst, 'root.hds'))
dd_path = os.path.join(dst, 'DiskDescriptor.xml')
out, err = utils.execute('ploop', 'mount', dd_path, run_as_root=True)
ro = re.search('dev=(\S+)', out)
if not ro:
utils.execute('ploop', 'umount', dd_path, run_as_root=True)
ploop_dev = ro.group(1)
try:
LOG.info("Convert to ploop format ...")
utils.execute('qemu-img', 'convert', '-O', 'raw',
glance_path, ploop_dev, run_as_root=True)
finally:
utils.execute('ploop', 'umount', dd_path, run_as_root=True)
utils.execute('rm', '-f', dd_path + '.lck')
os.unlink(glance_path)
class LZRWDownloader(ImageDownloader):
"Class for images stored in cploop format."
def fetch_to_lzrw(self, context, image_ref, image_meta, dst):
image_service = glance.get_remote_image_service(context, image_ref)[0]
with open(dst, 'w') as f:
image_service.download(context, image_ref, f)
def get_downloader(disk_format):
if disk_format == 'ploop':
return PloopDownloader()
elif disk_format == 'cploop':
return LZRWDownloader()
else:
return QemuDownloader()
| {
"content_hash": "b28916a8bae1fe95164e86c8cb7f0d24",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 79,
"avg_line_length": 35.23376623376623,
"alnum_prop": 0.5851455952819756,
"repo_name": "CloudServer/pcs-nova-driver",
"id": "eb72103ef658b526c1c9914e9cbc0d08adbb556c",
"size": "16918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcsnovadriver/pcs/template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143916"
},
{
"name": "Shell",
"bytes": "7110"
}
],
"symlink_target": ""
} |
from stellar.models import get_unique_hash, Table, Snapshot
def test_get_unique_hash():
assert get_unique_hash()
assert get_unique_hash() != get_unique_hash()
assert len(get_unique_hash()) == 32
def test_table():
table = Table(
table_name='hapsu',
snapshot=Snapshot(
snapshot_name='snapshot',
project_name='myproject',
hash='3330484d0a70eecab84554b5576b4553'
)
)
assert len(table.get_table_name('master')) == 24
| {
"content_hash": "6d517506d98e7f1fbbfd961554ce1753",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 26.31578947368421,
"alnum_prop": 0.612,
"repo_name": "Wanderfalke/stellar",
"id": "234d7e5a30a3331056a29c2623f76f58b47c2913",
"size": "500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30868"
}
],
"symlink_target": ""
} |
from .core import guess
| {
"content_hash": "eb55a97da1407e826a3e6b208d1031c4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.7916666666666666,
"repo_name": "spulec/guesser",
"id": "47c687de153b21b2adea483c3d24c5390ccde259",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guesser/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5761"
}
],
"symlink_target": ""
} |
from troposphere import Base64, FindInMap, GetAtt, Join, Output
from troposphere import Parameter, Ref, Template
import troposphere.ec2 as ec2
import troposphere.elasticloadbalancingv2 as elb
def AddAMI(template):
template.add_mapping("RegionMap", {
"us-east-1": {"AMI": "ami-6411e20d"},
"us-west-1": {"AMI": "ami-c9c7978c"},
"us-west-2": {"AMI": "ami-fcff72cc"},
"eu-west-1": {"AMI": "ami-37c2f643"},
"ap-southeast-1": {"AMI": "ami-66f28c34"},
"ap-northeast-1": {"AMI": "ami-9c03a89d"},
"sa-east-1": {"AMI": "ami-a039e6bd"}
})
def main():
template = Template()
template.add_version("2010-09-09")
template.add_description(
"AWS CloudFormation Sample Template: NLB with 1 EC2 instance")
AddAMI(template)
# Add the Parameters
keyname_param = template.add_parameter(Parameter(
"KeyName",
Type="String",
Default="mark",
Description="Name of an existing EC2 KeyPair to "
"enable SSH access to the instance",
))
template.add_parameter(Parameter(
"InstanceType",
Type="String",
Description="WebServer EC2 instance type",
Default="m1.small",
AllowedValues=[
"t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge",
"m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge",
"cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge"
],
ConstraintDescription="must be a valid EC2 instance type.",
))
webport_param = template.add_parameter(Parameter(
"WebServerPort",
Type="String",
Default="8888",
Description="TCP/IP port of the web server",
))
subnetA = template.add_parameter(Parameter(
"subnetA",
Type="String",
Default="subnet-096fd06d"
))
subnetB = template.add_parameter(Parameter(
"subnetB",
Type="String",
Default="subnet-1313ef4b"
))
VpcId = template.add_parameter(Parameter(
"VpcId",
Type="String",
Default="vpc-82c514e6"
))
# Define the instance security group
instance_sg = template.add_resource(
ec2.SecurityGroup(
"InstanceSecurityGroup",
GroupDescription="Enable SSH and HTTP access on the inbound port",
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort="22",
ToPort="22",
CidrIp="0.0.0.0/0",
),
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort=Ref(webport_param),
ToPort=Ref(webport_param),
CidrIp="0.0.0.0/0",
),
]
)
)
eipA = template.add_resource(ec2.EIP('eipA', Domain='vpc',))
eipB = template.add_resource(ec2.EIP('eipB', Domain='vpc',))
# Add the web server instance
WebInstance = template.add_resource(ec2.Instance(
"WebInstance",
SecurityGroups=[Ref(instance_sg)],
KeyName=Ref(keyname_param),
InstanceType=Ref("InstanceType"),
ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
UserData=Base64(Ref(webport_param)),
))
# Add the network LB
NetworkLB = template.add_resource(elb.LoadBalancer(
"NetworkLB",
Name="NetworkLB",
Scheme="internet-facing",
SubnetMappings=[
elb.SubnetMapping(
AllocationId=GetAtt(eipA, 'AllocationId'),
SubnetId=Ref(subnetA)
),
elb.SubnetMapping(
AllocationId=GetAtt(eipB, 'AllocationId'),
SubnetId=Ref(subnetB)
)
],
Type='network'
))
TargetGroupWeb = template.add_resource(elb.TargetGroup(
"TargetGroupWeb",
HealthCheckIntervalSeconds="30",
HealthCheckProtocol="HTTP",
HealthCheckTimeoutSeconds="10",
HealthyThresholdCount="4",
Matcher=elb.Matcher(
HttpCode="200"),
Name="WebTarget",
Port=Ref(webport_param),
Protocol="HTTP",
Targets=[elb.TargetDescription(
Id=Ref(WebInstance),
Port=Ref(webport_param))],
UnhealthyThresholdCount="3",
VpcId=Ref(VpcId)
))
template.add_resource(elb.Listener(
"Listener",
Port="80",
Protocol="HTTP",
LoadBalancerArn=Ref(NetworkLB),
DefaultActions=[elb.Action(
Type="forward",
TargetGroupArn=Ref(TargetGroupWeb)
)]
))
template.add_output(Output(
"URL",
Description="URL of the sample website",
Value=Join("", ["http://", GetAtt(NetworkLB, "DNSName")])
))
print(template.to_json())
if __name__ == '__main__':
main()
| {
"content_hash": "feb112e3dc231b527d5ca51ce9516e6b",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 29.159763313609467,
"alnum_prop": 0.5549918831168831,
"repo_name": "pas256/troposphere",
"id": "b721f66c5deb2fda445d599633dd1d629b798cd0",
"size": "5046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/NetworkLB.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "521885"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
from base import BaseStore
from voodoo import VoodooStore
from facet import FacetStore
from django import DjangoStore
from memory import BaseMemoryStore
from obj import BaseMemoryObject
| {
"content_hash": "59a0e25235debd2e628560a12998d6e7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 31,
"alnum_prop": 0.8709677419354839,
"repo_name": "prior/cynq",
"id": "89e901b4eefb2d21c0af6781de2956901bccdba5",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cynq/store/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53751"
}
],
"symlink_target": ""
} |
import yaml
import os.path
import re
import sys
kwarg_pattern = re.compile('<([a-zA-Z0-9_]+=?[a-zA-Z0-9_, \(\)\'\"]*)>')
def to_string(value):
if type(value) in (list, tuple):
return ",".join(map(str, value))
elif value is None:
return ""
else:
return str(value)
def indent(text, levels, pad=" "):
padding = "".join([pad] * levels)
return padding + text.replace("\n", "\n" + padding)
def isnumeric(value):
try:
float(value)
return True
except ValueError:
return False
def process_kwarg_default(value):
if value[0] + value[-1] in ("()", "[]"):
return value # default is a list or tuple, assume values were entered correctly
elif value[0] + value[-1] in ('""', "''"):
return value # value is an explicit string, return as is
elif isnumeric(value):
return str(value)
else:
return '"{:}"'.format(value) # treat as string, must have quotes to use as a kwarg default value
def parse_command_string(command_string):
args = kwarg_pattern.findall(command_string)
kwargs = list()
for arg in args:
if "=" in arg:
kwarg, val = arg.split("=")
val = process_kwarg_default(val)
else:
kwarg = arg
val = '""'
kwargs.append([kwarg, val])
kwargs_string = "".join([', ' + kwarg + "=" + val for kwarg, val in kwargs])
if len(args) > 0:
command_base = kwarg_pattern.sub("{:}", command_string)
args_string = ", ".join(kwarg for kwarg, val in kwargs)
scpi_command = 'scpi_preprocess("{:}", {:})'.format(command_base, args_string)
else:
scpi_command = '"{:}"'.format(command_string)
return kwargs_string, scpi_command
def parse_write_values_string(command_string):
"""
parse the command string for the write_values scpi command which is a little different than the others
Parameters
----------
command_string : str
the input string that will be parsed for keyword arguments
"""
args = kwarg_pattern.findall(command_string)
kwargs = list()
for arg in args:
if "=" in arg:
kwarg, val = arg.split("=")
val = process_kwarg_default(val)
else:
kwarg = arg
val = '""'
kwargs.append([kwarg, val])
kwargs[-1][1] = "None" # data_values will be set to None as default
kwargs_string = "".join([', ' + kwarg + "=" + val for kwarg, val in kwargs])
command_string = command_string.replace("<{:}>".format(args[-1]), "")
command_base = kwarg_pattern.sub("{:}", command_string)
args_string = ", ".join(kwarg for kwarg, val in kwargs[:-1]) # last arg is the data we pass in
scpi_command = 'scpi_preprocess("{:}", {:})'.format(command_base, args_string)
return kwargs_string, scpi_command, kwargs[-1][0]
def generate_set_string(command, command_root):
command_string = " ".join((command_root, to_string(command["set"]))).strip()
kwargs_string, scpi_command = parse_command_string(command_string)
function_string = \
"""def set_{:s}(self{:}):
scpi_command = {:}
self.write(scpi_command)""".format(command['name'], kwargs_string, scpi_command)
return function_string
def generate_set_values_string(command, command_root):
command_string = " ".join((command_root, to_string(command["set_values"]))).strip()
kwargs_string, scpi_command, data_variable = parse_write_values_string(command_string)
function_string = \
"""def set_{:s}(self{:}):
scpi_command = {:}
self.write_values(scpi_command, {:})""".format(
command['name'], kwargs_string, scpi_command, data_variable)
return function_string
def generate_query_string(command, command_root):
command_string = "? ".join((command_root, to_string(command["query"]))).strip()
kwargs_string, scpi_command = parse_command_string(command_string)
converter = command.get('returns', "str")
valid_converters = ("int", "str", "float", "bool")
if converter not in valid_converters:
raise ValueError("""error in processing command {:}
returns value '{:}' is invalid
must be one of {:}
""".format(command_string, converter, ", ".join(valid_converters)))
pre_line = ""
strip_outer_quotes = bool(command.get("strip_outer_quotes", True))
csv = bool(command.get('csv', False))
if csv or strip_outer_quotes or converter != "str":
pre_line = \
"\n value = process_query(value, csv={:}, strip_outer_quotes={:}, returns='{:}')".format(
csv, strip_outer_quotes, converter
)
function_string = \
"""def query_{:s}(self{:}):
scpi_command = {:}
value = self.query(scpi_command){:}
return value""".format(command['name'], kwargs_string, scpi_command, pre_line)
return function_string
def generate_query_values_string(command, command_root):
command_string = "? ".join((command_root, to_string(command["query_values"]))).strip()
kwargs_string, scpi_command = parse_command_string(command_string)
function_string = \
"""def query_{:s}(self{:}):
scpi_command = {:}
return self.query_values(scpi_command)""".format(
command['name'], kwargs_string, scpi_command)
return function_string
def parse_branch(branch, set_strings=None, query_strings=None, query_value_strings=None, root=""):
if set_strings is None:
set_strings = list()
if query_strings is None:
query_strings = list()
if query_value_strings is None:
query_value_strings = list()
for key, value in branch.items():
command_root = root + ":" + key
command = None
branch = None
try:
if "name" in value.keys():
command = value
elif "command" in value.keys():
command = value["command"]
branch = value["branch"]
else:
branch = value
except Exception as e:
print(key, value)
raise Exception(e)
if command:
if "set" in command.keys():
set_strings.append(generate_set_string(command, command_root))
if "set_values" in command.keys():
set_strings.append(generate_set_values_string(command, command_root))
if "query" in command.keys():
query_strings.append(generate_query_string(command, command_root))
if "query_values" in command.keys():
query_strings.append(generate_query_values_string(command, command_root))
if branch:
parse_branch(branch, set_strings, query_strings, query_value_strings, command_root)
return set_strings, query_strings
header_string = """import re
null_parameter = re.compile(",{2,}") # detect optional null parameter as two consecutive commas, and remove
converters = {
"str": str,
"int": int,
"float": float,
"bool": lambda x: bool(int(x)),
}"""
string_converter = """def to_string(value):
tval = type(value)
if tval is str:
return value
elif tval is bool:
return str(int(value))
elif tval in (list, tuple):
return ",".join(map(to_string, value))
elif value is None:
return ""
else:
return str(value)"""
scpi_preprocessor = """def scpi_preprocess(command_string, *args):
args = list(args)
for i, arg in enumerate(args):
args[i] = to_string(arg)
cmd = command_string.format(*args)
return null_parameter.sub(",", cmd)"""
query_processor = """def process_query(query, csv=False, strip_outer_quotes=True, returns="str"):
if strip_outer_quotes is True:
if query[0] + query[-1] in ('""', "''"):
query = query[1:-1]
if csv is True:
query = query.split(",")
converter = None if returns == "str" else converters.get(returns, None)
if converter:
if csv is True:
query = list(map(converter, query))
else:
query = converter(query)
return query"""
class_header = """class SCPI(object):
def __init__(self, resource):
self.resource = resource
self.echo = False # print scpi command string to scpi out
def write(self, scpi, *args, **kwargs):
if self.echo:
print(scpi)
self.resource.write(scpi, *args, **kwargs)
def query(self, scpi, *args, **kwargs):
if self.echo:
print(scpi)
return self.resource.query(scpi, *args, **kwargs)
def write_values(self, scpi, *args, **kwargs):
if self.echo:
print(scpi)
self.resource.write_values(scpi, *args, **kwargs)
def query_values(self, scpi, *args, **kwargs):
if self.echo:
print(scpi)
return self.resource.query_values(scpi, *args, **kwargs)
"""
def parse_yaml_file(driver_yaml_file):
driver = os.path.splitext(driver_yaml_file)[0] + ".py"
driver_template = None
with open(driver_yaml_file, 'r') as yaml_file:
driver_template = yaml.load(yaml_file)
sets, queries = parse_branch(driver_template["COMMAND_TREE"])
driver_str = "\n\n\n".join((header_string, string_converter, scpi_preprocessor, query_processor)) + "\n\n\n"
driver_str += class_header
for s in sorted(sets, key=str.lower):
driver_str += "\n" + indent(s, 1) + "\n"
for q in sorted(queries, key=str.lower):
driver_str += "\n" + indent(q, 1) + "\n"
with open(driver, 'w') as scpi_driver:
scpi_driver.write(driver_str)
if __name__ == "__main__":
driver_yaml_file = os.path.abspath(sys.argv[1])
parse_yaml_file(driver_yaml_file)
| {
"content_hash": "dc30d5c9f35ffaeb078bec28516473a9",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 112,
"avg_line_length": 32.68456375838926,
"alnum_prop": 0.5935318275154005,
"repo_name": "Ttl/scikit-rf",
"id": "4eef8d90fe3b75eb1ee4968fe3002c971b00a481",
"size": "9740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skrf/vi/scpi/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "C",
"bytes": "4015"
},
{
"name": "Jupyter Notebook",
"bytes": "7151"
},
{
"name": "Python",
"bytes": "1124439"
},
{
"name": "Scheme",
"bytes": "6630"
},
{
"name": "Shell",
"bytes": "219"
},
{
"name": "TypeScript",
"bytes": "1286336"
}
],
"symlink_target": ""
} |
import cx_Oracle
import time
print ("About to sleep")
time.sleep(30)
dsnStr = cx_Oracle.makedsn("oradb", 1521, "xe")
print ("No mo sleeping brah")
connection = cx_Oracle.connect(user="hr", password="hr", dsn=dsnStr)
cursor = connection.cursor()
cursor.execute("select sysdate from dual")
today, = cursor.fetchone()
print("The current date is", today) | {
"content_hash": "d7a1f5cbdcff5ed22f2aaff5f8498498",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7211267605633803,
"repo_name": "chembl/surechembl-data-client",
"id": "3f3f381a9778b88e31142e47cb10d536e9f6ecda",
"size": "355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Docker/data-client/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1465"
},
{
"name": "Python",
"bytes": "83626"
},
{
"name": "Shell",
"bytes": "1040"
},
{
"name": "TSQL",
"bytes": "7457"
}
],
"symlink_target": ""
} |
import copy
from django.conf import settings
from olympia.constants.promoted import RECOMMENDED
import olympia.core.logger
from olympia import amo
from olympia.amo.indexers import BaseSearchIndexer
from olympia.amo.utils import attach_trans_dict
from olympia.amo.celery import create_chunked_tasks_signatures
from olympia.lib.es.utils import create_index
from olympia.versions.compare import version_int
log = olympia.core.logger.getLogger('z.es')
class AddonIndexer(BaseSearchIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'boost',
'colors',
'hotness',
# Translated content that is used for filtering purposes is stored
# under 3 different fields:
# - One field with all translations (e.g., "name").
# - One field for each language, using corresponding analyzer
# (e.g., "name_l10n_en-us", "name_l10n_fr", etc.)
# - One field with all translations in separate objects for the API
# (e.g. "name_translations")
# Only that last one with all translations needs to be returned.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
'summary',
'summary_l10n_*',
)
index_settings = {
'analysis': {
'analyzer': {
'standard_with_word_split': {
# This analyzer tries to split the text into words by using
# various methods. It also lowercases them and make sure
# each token is only returned once.
# Only use for short things with extremely meaningful
# content like add-on name - it makes too many
# modifications to be useful for things like descriptions,
# for instance.
'tokenizer': 'standard',
'filter': [
'standard',
'custom_word_delimiter',
'lowercase',
'stop',
'custom_dictionary_decompounder',
'unique',
],
},
'trigram': {
# Analyzer that splits the text into trigrams.
'tokenizer': 'ngram_tokenizer',
'filter': [
'lowercase',
],
},
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'ngram',
'min_gram': 3,
'max_gram': 3,
'token_chars': ['letter', 'digit'],
}
},
'normalizer': {
'lowercase_keyword_normalizer': {
# By default keywords are indexed 'as-is', but for exact
# name matches we need to lowercase them before indexing,
# so this normalizer does that for us.
'type': 'custom',
'filter': ['lowercase'],
},
},
'filter': {
'custom_word_delimiter': {
# This filter is useful for add-on names that have multiple
# words sticked together in a way that is easy to
# recognize, like FooBar, which should be indexed as FooBar
# and Foo Bar. (preserve_original: True makes us index both
# the original and the split version.)
'type': 'word_delimiter',
'preserve_original': True,
},
'custom_dictionary_decompounder': {
# This filter is also useful for add-on names that have
# multiple words sticked together, but without a pattern
# that we can automatically recognize. To deal with those,
# we use a small dictionary of common words. It allows us
# to index 'awesometabpassword' as 'awesome tab password',
# helping users looking for 'tab password' find that addon.
'type': 'dictionary_decompounder',
'word_list': [
'all',
'auto',
'ball',
'bar',
'block',
'blog',
'bookmark',
'browser',
'bug',
'button',
'cat',
'chat',
'click',
'clip',
'close',
'color',
'context',
'cookie',
'cool',
'css',
'delete',
'dictionary',
'down',
'download',
'easy',
'edit',
'fill',
'fire',
'firefox',
'fix',
'flag',
'flash',
'fly',
'forecast',
'fox',
'foxy',
'google',
'grab',
'grease',
'html',
'http',
'image',
'input',
'inspect',
'inspector',
'iris',
'js',
'key',
'keys',
'lang',
'link',
'mail',
'manager',
'map',
'mega',
'menu',
'menus',
'monkey',
'name',
'net',
'new',
'open',
'password',
'persona',
'privacy',
'query',
'screen',
'scroll',
'search',
'secure',
'select',
'smart',
'spring',
'status',
'style',
'super',
'sync',
'tab',
'text',
'think',
'this',
'time',
'title',
'translate',
'tree',
'undo',
'upload',
'url',
'user',
'video',
'window',
'with',
'word',
'zilla',
],
},
},
}
}
@classmethod
def get_model(cls):
from olympia.addons.models import Addon
return Addon
@classmethod
def get_index_alias(cls):
"""Return the index alias name."""
return settings.ES_INDEXES.get('default')
@classmethod
def get_mapping(cls):
doc_name = cls.get_doctype_name()
appver_mapping = {
'properties': {
'max': {'type': 'long'},
'min': {'type': 'long'},
'max_human': {'type': 'keyword', 'index': False},
'min_human': {'type': 'keyword', 'index': False},
}
}
version_mapping = {
'type': 'object',
'properties': {
'compatible_apps': {
'properties': {app.id: appver_mapping for app in amo.APP_USAGE}
},
# Keep '<version>.id' indexed to be able to run exists queries
# on it.
'id': {'type': 'long'},
'reviewed': {'type': 'date', 'index': False},
'files': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'created': {'type': 'date', 'index': False},
'hash': {'type': 'keyword', 'index': False},
'filename': {'type': 'keyword', 'index': False},
'is_mozilla_signed_extension': {'type': 'boolean'},
'size': {'type': 'long', 'index': False},
'strict_compatibility': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'permissions': {'type': 'keyword', 'index': False},
'optional_permissions': {'type': 'keyword', 'index': False},
},
},
'license': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'builtin': {'type': 'boolean', 'index': False},
'name_translations': cls.get_translations_definition(),
'url': {'type': 'text', 'index': False},
},
},
'release_notes_translations': cls.get_translations_definition(),
'version': {'type': 'keyword', 'index': False},
},
}
mapping = {
doc_name: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'byte'},
'average_daily_users': {'type': 'long'},
'bayesian_rating': {'type': 'double'},
'boost': {'type': 'float', 'null_value': 1.0},
'category': {'type': 'integer'},
'colors': {
'type': 'nested',
'properties': {
'h': {'type': 'integer'},
's': {'type': 'integer'},
'l': {'type': 'integer'},
'ratio': {'type': 'double'},
},
},
'contributions': {'type': 'text'},
'created': {'type': 'date'},
'current_version': version_mapping,
'default_locale': {'type': 'keyword', 'index': False},
'description': {'type': 'text', 'analyzer': 'snowball'},
'guid': {'type': 'keyword'},
'has_eula': {'type': 'boolean', 'index': False},
'has_privacy_policy': {'type': 'boolean', 'index': False},
'hotness': {'type': 'double'},
'icon_hash': {'type': 'keyword', 'index': False},
'icon_type': {'type': 'keyword', 'index': False},
'is_disabled': {'type': 'boolean'},
'is_experimental': {'type': 'boolean'},
'is_recommended': {'type': 'boolean'},
'last_updated': {'type': 'date'},
'listed_authors': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'name': {'type': 'text'},
'username': {'type': 'keyword'},
'is_public': {'type': 'boolean', 'index': False},
},
},
'modified': {'type': 'date', 'index': False},
'name': {
'type': 'text',
# Adding word-delimiter to split on camelcase, known
# words like 'tab', and punctuation, and eliminate
# duplicates.
'analyzer': 'standard_with_word_split',
'fields': {
# Raw field for exact matches and sorting.
'raw': cls.get_raw_field_definition(),
# Trigrams for partial matches.
'trigrams': {
'type': 'text',
'analyzer': 'trigram',
},
},
},
'previews': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'caption_translations': cls.get_translations_definition(),
'modified': {'type': 'date', 'index': False},
'sizes': {
'type': 'object',
'properties': {
'thumbnail': {'type': 'short', 'index': False},
'image': {'type': 'short', 'index': False},
},
},
},
},
'promoted': {
'type': 'object',
'properties': {
'group_id': {'type': 'byte'},
'approved_for_apps': {'type': 'byte'},
},
},
'ratings': {
'type': 'object',
'properties': {
'count': {'type': 'short', 'index': False},
'average': {'type': 'float'},
},
},
'slug': {'type': 'keyword'},
'requires_payment': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'summary': {'type': 'text', 'analyzer': 'snowball'},
'tags': {'type': 'keyword'},
'type': {'type': 'byte'},
'weekly_downloads': {'type': 'long'},
},
},
}
# Add fields that we expect to return all translations without being
# analyzed/indexed.
cls.attach_translation_mappings(
mapping,
(
'description',
'developer_comments',
'homepage',
'name',
'summary',
'support_email',
'support_url',
),
)
# Add language-specific analyzers for localized fields that are
# analyzed/indexed.
cls.attach_language_specific_analyzers(mapping, ('description', 'summary'))
cls.attach_language_specific_analyzers_with_raw_variant(mapping, ('name',))
return mapping
@classmethod
def extract_version(cls, obj, version_obj):
from olympia.versions.models import License, Version
data = (
{
'id': version_obj.pk,
'compatible_apps': cls.extract_compatibility_info(obj, version_obj),
'files': [
{
'id': version_obj.file.id,
'created': version_obj.file.created,
'filename': version_obj.file.filename,
'hash': version_obj.file.hash,
'is_mozilla_signed_extension': (
version_obj.file.is_mozilla_signed_extension
),
'size': version_obj.file.size,
'status': version_obj.file.status,
'strict_compatibility': version_obj.file.strict_compatibility,
'permissions': version_obj.file.permissions,
'optional_permissions': version_obj.file.optional_permissions,
}
],
'reviewed': version_obj.reviewed,
'version': version_obj.version,
}
if version_obj
else None
)
if data and version_obj:
attach_trans_dict(Version, [version_obj])
data.update(
cls.extract_field_api_translations(
version_obj, 'release_notes', db_field='release_notes_id'
)
)
if version_obj.license:
data['license'] = {
'id': version_obj.license.id,
'builtin': bool(version_obj.license.builtin),
'url': version_obj.license.url,
}
attach_trans_dict(License, [version_obj.license])
data['license'].update(
cls.extract_field_api_translations(version_obj.license, 'name')
)
return data
@classmethod
def extract_compatibility_info(cls, obj, version_obj):
"""Return compatibility info for the specified version_obj, as will be
indexed in ES."""
compatible_apps = {}
for app, appver in version_obj.compatible_apps.items():
if appver:
min_, max_ = appver.min.version_int, appver.max.version_int
min_human, max_human = appver.min.version, appver.max.version
if not version_obj.file.strict_compatibility:
# The files attached to this version are not using strict
# compatibility, so the max version essentially needs to be
# ignored - let's fake a super high one. We leave max_human
# alone to leave the API representation intact.
max_ = version_int('*')
else:
# Fake wide compatibility for add-ons with no info. We don't
# want to reindex every time a new version of the app is
# released, so we directly index a super high version as the
# max.
min_human, max_human = (
amo.D2C_MIN_VERSIONS.get(app.id, '1.0'),
amo.FAKE_MAX_VERSION,
)
min_, max_ = version_int(min_human), version_int(max_human)
compatible_apps[app.id] = {
'min': min_,
'min_human': min_human,
'max': max_,
'max_human': max_human,
}
return compatible_apps
@classmethod
def extract_document(cls, obj):
"""Extract indexable attributes from an add-on."""
from olympia.addons.models import Preview
attrs = (
'id',
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'hotness',
'icon_hash',
'icon_type',
'is_disabled',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
)
data = {attr: getattr(obj, attr) for attr in attrs}
data['colors'] = None
# Extract dominant colors from static themes.
if obj.type == amo.ADDON_STATICTHEME:
first_preview = obj.current_previews.first()
if first_preview:
data['colors'] = first_preview.colors
data['app'] = [app.id for app in obj.compatible_apps.keys()]
# Boost by the number of users on a logarithmic scale.
data['boost'] = float(data['average_daily_users'] ** 0.2)
# Quadruple the boost if the add-on is public.
if (
obj.status == amo.STATUS_APPROVED
and not obj.is_experimental
and 'boost' in data
):
data['boost'] = float(max(data['boost'], 1) * 4)
# We can use all_categories because the indexing code goes through the
# transformer that sets it.
data['category'] = [cat.id for cat in obj.all_categories]
data['current_version'] = cls.extract_version(obj, obj.current_version)
data['listed_authors'] = [
{
'name': a.name,
'id': a.id,
'username': a.username,
'is_public': a.is_public,
}
for a in obj.listed_authors
]
data['has_eula'] = bool(obj.eula)
data['has_privacy_policy'] = bool(obj.privacy_policy)
data['is_recommended'] = bool(
obj.promoted and obj.promoted.group == RECOMMENDED
)
data['previews'] = [
{'id': preview.id, 'modified': preview.modified, 'sizes': preview.sizes}
for preview in obj.current_previews
]
data['promoted'] = (
{
'group_id': obj.promoted.group_id,
# store the app approvals because .approved_applications needs it.
'approved_for_apps': [
app.id for app in obj.promoted.approved_applications
],
}
if obj.promoted
else None
)
data['ratings'] = {
'average': obj.average_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
# We can use tag_list because the indexing code goes through the
# transformer that sets it (attach_tags).
data['tags'] = getattr(obj, 'tag_list', [])
# Handle localized fields.
# First, deal with the 3 fields that need everything:
for field in ('description', 'name', 'summary'):
data.update(cls.extract_field_api_translations(obj, field))
data.update(
cls.extract_field_search_translation(obj, field, obj.default_locale)
)
data.update(cls.extract_field_analyzed_translations(obj, field))
# Then add fields that only need to be returned to the API without
# contributing to search relevancy.
for field in ('developer_comments', 'homepage', 'support_email', 'support_url'):
data.update(cls.extract_field_api_translations(obj, field))
if obj.type != amo.ADDON_STATICTHEME:
# Also do that for preview captions, which are set on each preview
# object.
attach_trans_dict(Preview, obj.current_previews)
for i, preview in enumerate(obj.current_previews):
data['previews'][i].update(
cls.extract_field_api_translations(preview, 'caption')
)
return data
@classmethod
def create_new_index(cls, index_name):
"""
Create a new index for addons in ES.
Intended to be used by reindexation (and tests), generally a bad idea
to call manually.
"""
index_settings = copy.deepcopy(cls.index_settings)
config = {
'mappings': {
cls.get_doctype_name(): cls.get_mapping(),
},
'settings': {
# create_index will add its own index settings like number of
# shards and replicas.
'index': index_settings
},
}
create_index(index_name, config)
@classmethod
def reindex_tasks_group(cls, index_name):
"""
Return the group of tasks to execute for a full reindex of addons on
the index called `index_name` (which is not an alias but the real
index name).
"""
from olympia.addons.tasks import index_addons
ids = cls.get_model().unfiltered.values_list('id', flat=True).order_by('id')
chunk_size = 150
return create_chunked_tasks_signatures(index_addons, list(ids), chunk_size)
| {
"content_hash": "3e92b580c6721436cc7a8ba15d1e625e",
"timestamp": "",
"source": "github",
"line_count": 621,
"max_line_length": 88,
"avg_line_length": 39.62157809983897,
"alnum_prop": 0.41459053038000404,
"repo_name": "mozilla/olympia",
"id": "0e5ab023b158574a0613a6b59ff5d765b62e2cf0",
"size": "24605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/addons/indexers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3997396"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
"""Test UTXO set hash value calculation in gettxoutsetinfo."""
import struct
from test_framework.blocktools import create_transaction
from test_framework.messages import (
CBlock,
COutPoint,
FromHex,
)
from test_framework.muhash import MuHash3072
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class UTXOSetHashTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_deterministic_hash_results(self):
self.log.info("Test deterministic UTXO set hash results")
# These depend on the setup_clean_chain option, the chain loaded from the cache
assert_equal(self.nodes[0].gettxoutsetinfo()['hash_serialized_2'], "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8")
assert_equal(self.nodes[0].gettxoutsetinfo("muhash")['muhash'], "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8")
def test_muhash_implementation(self):
self.log.info("Test MuHash implementation consistency")
node = self.nodes[0]
# Generate 100 blocks and remove the first since we plan to spend its
# coinbase
block_hashes = node.generate(100)
blocks = list(map(lambda block: FromHex(CBlock(), node.getblock(block, False)), block_hashes))
spending = blocks.pop(0)
# Create a spending transaction and mine a block which includes it
tx = create_transaction(node, spending.vtx[0].rehash(), node.getnewaddress(), amount=49)
txid = node.sendrawtransaction(hexstring=tx.serialize_with_witness().hex(), maxfeerate=0)
tx_block = node.generateblock(output=node.getnewaddress(), transactions=[txid])
blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'], False)))
# Serialize the outputs that should be in the UTXO set and add them to
# a MuHash object
muhash = MuHash3072()
for height, block in enumerate(blocks):
# The Genesis block coinbase is not part of the UTXO set and we
# spent the first mined block
height += 2
for tx in block.vtx:
for n, tx_out in enumerate(tx.vout):
coinbase = 1 if not tx.vin[0].prevout.hash else 0
# Skip witness commitment
if (coinbase and n > 0):
continue
data = COutPoint(int(tx.rehash(), 16), n).serialize()
data += struct.pack("<i", height * 2 + coinbase)
data += tx_out.serialize()
muhash.insert(data)
finalized = muhash.digest()
node_muhash = node.gettxoutsetinfo("muhash")['muhash']
assert_equal(finalized[::-1].hex(), node_muhash)
def run_test(self):
self.test_deterministic_hash_results()
self.test_muhash_implementation()
if __name__ == '__main__':
UTXOSetHashTest().main()
| {
"content_hash": "deacefbbe08ac8d13f873b18d2ad638e",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 142,
"avg_line_length": 37.792682926829265,
"alnum_prop": 0.6453694740238787,
"repo_name": "Sjors/bitcoin",
"id": "6e6046d84df8ef9b11b6dd00f41c832ca38047d2",
"size": "3313",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/feature_utxo_set_hash.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "1134356"
},
{
"name": "C++",
"bytes": "8242550"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "202935"
},
{
"name": "Makefile",
"bytes": "125925"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2311412"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "9210"
},
{
"name": "Shell",
"bytes": "164103"
}
],
"symlink_target": ""
} |
from rllab.spaces.base import Space
import numpy as np
from rllab.misc import special2 as special
import tensorflow as tf
class Discrete(Space):
"""
{0,1,...,n-1}
"""
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def sample_n(self, n):
return np.random.randint(low=0, high=self.n, size=n)
def contains(self, x):
x = np.asarray(x)
return x.shape == () and x.dtype.kind == 'i' and x >= 0 and x < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def default_value(self):
return 0
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
def new_tensor_variable(self, name, extra_dims):
# needed for safe conversion to float32
return tf.placeholder(dtype=tf.uint8, shape=[None] * extra_dims + [self.flat_dim], name=name)
#return tf.placeholder(dtype=tf.int64, shape=[None] * extra_dims, name=name)
@property
def dtype(self):
return tf.uint8
def __eq__(self, other):
if not isinstance(other, Discrete):
return False
return self.n == other.n
def __hash__(self):
return hash(self.n)
| {
"content_hash": "5ae97030c6a51467f7b983ff7362ee3e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 101,
"avg_line_length": 23.64788732394366,
"alnum_prop": 0.5902322811197142,
"repo_name": "brain-research/mirage-rl-qprop",
"id": "6cbfc70ed9f6133948916bda7f5eeee323adc03a",
"size": "1679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/rocky/tf/spaces/discrete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8270"
},
{
"name": "Dockerfile",
"bytes": "2310"
},
{
"name": "HTML",
"bytes": "14896"
},
{
"name": "JavaScript",
"bytes": "28156"
},
{
"name": "Jupyter Notebook",
"bytes": "151886"
},
{
"name": "Mako",
"bytes": "3714"
},
{
"name": "Python",
"bytes": "1831569"
},
{
"name": "Ruby",
"bytes": "12147"
},
{
"name": "Shell",
"bytes": "13760"
}
],
"symlink_target": ""
} |
import numpy as np
from pyodesys.util import import_
from pyodesys.core import integrate_chained
from pyodesys.symbolic import SymbolicSys, PartiallySolvedSystem, symmetricsys, TransformedSys
from pyodesys.tests._robertson import get_ode_exprs
sp = import_('sympy')
def _test_chained_multi_native(NativeSys, integrator='cvode', rtol_close=0.02, atol=1e-10,
rtol=1e-14, steps_fact=1, **kwargs):
logc, logt, reduced = kwargs.pop('logc'), kwargs.pop('logt'), kwargs.pop('reduced')
zero_time, zero_conc, nonnegative = kwargs.pop('zero_time'), kwargs.pop('zero_conc'), kwargs.pop('nonnegative')
logexp = (sp.log, sp.exp)
ny, nk = 3, 3
k = (.04, 1e4, 3e7)
init_conc = (1, zero_conc, zero_conc)
tend = 1e11
_yref_1e11 = (0.2083340149701255e-7, 0.8333360770334713e-13, 0.9999999791665050)
lin_s = SymbolicSys.from_callback(get_ode_exprs(logc=False, logt=False)[0], ny, nk,
lower_bounds=[0]*ny if nonnegative else None)
logexp = (sp.log, sp.exp)
if reduced:
if logc or logt:
PartSolvSys = PartiallySolvedSystem # we'll add NativeSys further down below
else:
class PartSolvSys(PartiallySolvedSystem, NativeSys):
pass
other1, other2 = [_ for _ in range(3) if _ != (reduced-1)]
def reduced_analytic(x0, y0, p0):
return {lin_s.dep[reduced-1]: y0[0] + y0[1] + y0[2] - lin_s.dep[other1] - lin_s.dep[other2]}
our_sys = PartSolvSys(lin_s, reduced_analytic)
else:
our_sys = lin_s
if logc or logt:
class TransformedNativeSys(TransformedSys, NativeSys):
pass
SS = symmetricsys(logexp if logc else None, logexp if logt else None, SuperClass=TransformedNativeSys)
our_sys = SS.from_other(our_sys)
ori_sys = NativeSys.from_other(lin_s)
for sys_iter, kw in [
([our_sys, ori_sys], {
'nsteps': [100*steps_fact, 1613*1.05*steps_fact],
'return_on_error': [True, False]
}),
([ori_sys], {
'nsteps': [1705*1.01*steps_fact]
})
]:
results = integrate_chained(
sys_iter, kw, [(zero_time, tend)]*3,
[init_conc]*3, [k]*3, integrator=integrator, atol=atol, rtol=rtol, **kwargs)
for res in results:
x, y, nfo = res
assert np.allclose(_yref_1e11, y[-1, :], atol=1e-16, rtol=rtol_close)
assert nfo['success'] == True # noqa
assert nfo['nfev'] > 100
assert nfo['njev'] > 10
assert nfo['nsys'] in (1, 2)
| {
"content_hash": "35840ddbe5f239e164b46428e59f0c30",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 115,
"avg_line_length": 38.46376811594203,
"alnum_prop": 0.5813865862848531,
"repo_name": "bjodah/pyodesys",
"id": "eea21d7019c3e0c03bd27b3148e8f1c4445ad55e",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyodesys/native/tests/_test_robertson_native.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "33074"
},
{
"name": "Cython",
"bytes": "31378"
},
{
"name": "Dockerfile",
"bytes": "1295"
},
{
"name": "JavaScript",
"bytes": "3806"
},
{
"name": "Jupyter Notebook",
"bytes": "13716"
},
{
"name": "Python",
"bytes": "309688"
},
{
"name": "Shell",
"bytes": "18142"
},
{
"name": "TeX",
"bytes": "2053"
}
],
"symlink_target": ""
} |
"""
This module holds models related to the Sponsor entity.
"""
from allauth.account.models import EmailAddress
from django.conf import settings
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.urls import reverse
from django_countries.fields import CountryField
from ordered_model.models import OrderedModel
from django.contrib.contenttypes.fields import GenericRelation
from cms.models import ContentManageable
from sponsors.models.assets import GenericAsset
from sponsors.models.managers import SponsorContactQuerySet
class Sponsor(ContentManageable):
"""
Group all of the sponsor information, logo and contacts
"""
name = models.CharField(
max_length=100,
verbose_name="Name",
help_text="Name of the sponsor, for public display.",
)
description = models.TextField(
verbose_name="Description",
help_text="Brief description of the sponsor for public display.",
)
landing_page_url = models.URLField(
blank=True,
null=True,
verbose_name="Landing page URL",
help_text="Landing page URL. This may be provided by the sponsor, however the linked page may not contain any "
"sales or marketing information.",
)
twitter_handle = models.CharField(
max_length=32, # Actual limit set by twitter is 15 characters, but that may change?
blank=True,
null=True,
verbose_name="Twitter handle",
)
web_logo = models.ImageField(
upload_to="sponsor_web_logos",
verbose_name="Web logo",
help_text="For display on our sponsor webpage. High resolution PNG or JPG, smallest dimension no less than "
"256px",
)
print_logo = models.FileField(
upload_to="sponsor_print_logos",
blank=True,
null=True,
verbose_name="Print logo",
help_text="For printed materials, signage, and projection. SVG or EPS",
)
primary_phone = models.CharField("Primary Phone", max_length=32)
mailing_address_line_1 = models.CharField(
verbose_name="Mailing Address line 1", max_length=128, default=""
)
mailing_address_line_2 = models.CharField(
verbose_name="Mailing Address line 2", max_length=128, blank=True, default=""
)
city = models.CharField(verbose_name="City", max_length=64, default="")
state = models.CharField(
verbose_name="State/Province/Region", max_length=64, blank=True, default=""
)
postal_code = models.CharField(
verbose_name="Zip/Postal Code", max_length=64, default=""
)
country = CountryField(default="")
assets = GenericRelation(GenericAsset)
class Meta:
verbose_name = "sponsor"
verbose_name_plural = "sponsors"
def verified_emails(self, initial_emails=None):
emails = initial_emails if initial_emails is not None else []
for contact in self.contacts.all():
if EmailAddress.objects.filter(
email__iexact=contact.email, verified=True
).exists():
emails.append(contact.email)
return list(set({e.casefold(): e for e in emails}.values()))
def __str__(self):
return f"{self.name}"
@property
def full_address(self):
addr = self.mailing_address_line_1
if self.mailing_address_line_2:
addr += f" {self.mailing_address_line_2}"
return f"{addr}, {self.city}, {self.state}, {self.country}"
@property
def primary_contact(self):
try:
return SponsorContact.objects.get_primary_contact(self)
except SponsorContact.DoesNotExist:
return None
@property
def slug(self):
return slugify(self.name)
@property
def admin_url(self):
return reverse("admin:sponsors_sponsor_change", args=[self.pk])
class SponsorContact(models.Model):
"""
Sponsor contact information
"""
PRIMARY_CONTACT = "primary"
ADMINISTRATIVE_CONTACT = "administrative"
ACCOUTING_CONTACT = "accounting"
MANAGER_CONTACT = "manager"
CONTACT_TYPES = [
(PRIMARY_CONTACT, "Primary"),
(ADMINISTRATIVE_CONTACT, "Administrative"),
(ACCOUTING_CONTACT, "Accounting"),
(MANAGER_CONTACT, "Manager"),
]
objects = SponsorContactQuerySet.as_manager()
sponsor = models.ForeignKey(
"Sponsor", on_delete=models.CASCADE, related_name="contacts"
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE
) # Optionally related to a User! (This needs discussion)
primary = models.BooleanField(
default=False,
help_text="The primary contact for a sponsorship will be responsible for managing deliverables we need to "
"fulfill benefits. Primary contacts will receive all email notifications regarding sponsorship. "
)
administrative = models.BooleanField(
default=False,
help_text="Administrative contacts will only be notified regarding contracts."
)
accounting = models.BooleanField(
default=False,
help_text="Accounting contacts will only be notified regarding invoices and payments."
)
manager = models.BooleanField(
default=False,
help_text="If this contact can manage sponsorship information on python.org",
)
name = models.CharField(max_length=100)
email = models.EmailField(max_length=256)
phone = models.CharField("Contact Phone", max_length=32)
# Sketch of something we'll need to determine if a user is able to make _changes_ to sponsorship
# benefits/logos/descriptons/etc.
@property
def can_manage(self):
if self.user is not None and (self.primary or self.manager):
return True
def __str__(self):
return f"Contact {self.name} from {self.sponsor}"
class SponsorBenefit(OrderedModel):
"""
Link a benefit to a sponsorship application.
Created after a new sponsorship
"""
sponsorship = models.ForeignKey(
'sponsors.Sponsorship', on_delete=models.CASCADE, related_name="benefits"
)
sponsorship_benefit = models.ForeignKey(
'sponsors.SponsorshipBenefit',
null=True,
blank=False,
on_delete=models.SET_NULL,
help_text="Sponsorship Benefit this Sponsor Benefit came from",
)
program_name = models.CharField(
max_length=1024,
verbose_name="Program Name",
help_text="For display in the contract and sponsor dashboard."
)
name = models.CharField(
max_length=1024,
verbose_name="Benefit Name",
help_text="For display in the contract and sponsor dashboard.",
)
description = models.TextField(
null=True,
blank=True,
verbose_name="Benefit Description",
help_text="For display in the contract and sponsor dashboard.",
)
program = models.ForeignKey(
'sponsors.SponsorshipProgram',
null=True,
blank=False,
on_delete=models.SET_NULL,
verbose_name="Sponsorship Program",
help_text="Which sponsorship program the benefit is associated with.",
)
benefit_internal_value = models.PositiveIntegerField(
null=True,
blank=True,
verbose_name="Benefit Internal Value",
help_text="Benefit's internal value from when the Sponsorship gets created",
)
added_by_user = models.BooleanField(
blank=True, default=False, verbose_name="Added by user?"
)
standalone = models.BooleanField(
blank=True, default=False, verbose_name="Added as standalone benefit?"
)
def __str__(self):
if self.program is not None:
return f"{self.program} > {self.name}"
return f"{self.program_name} > {self.name}"
@property
def features(self):
return self.benefitfeature_set
@classmethod
def new_copy(cls, benefit, **kwargs):
kwargs["added_by_user"] = kwargs.get("added_by_user") or benefit.standalone
kwargs["standalone"] = benefit.standalone
sponsor_benefit = cls.objects.create(
sponsorship_benefit=benefit,
program_name=benefit.program.name,
name=benefit.name,
description=benefit.description,
program=benefit.program,
benefit_internal_value=benefit.internal_value,
**kwargs,
)
# generate benefit features from benefit features configurations
for feature_config in benefit.features_config.all():
feature_config.create_benefit_feature(sponsor_benefit)
return sponsor_benefit
@property
def legal_clauses(self):
if self.sponsorship_benefit is not None:
return self.sponsorship_benefit.legal_clauses.all()
return []
@property
def name_for_display(self):
name = self.name
for feature in self.features.all():
name = feature.display_modifier(name)
return name
def reset_attributes(self, benefit):
"""
This method resets all the sponsor benefit information
fetching new data from the sponsorship benefit.
"""
self.program_name = benefit.program.name
self.name = benefit.name
self.description = benefit.description
self.program = benefit.program
self.benefit_internal_value = benefit.internal_value
self.standalone = benefit.standalone
self.added_by_user = self.added_by_user or self.standalone
# generate benefit features from benefit features configurations
features = self.features.all().delete()
for feature_config in benefit.features_config.all():
feature_config.create_benefit_feature(self)
self.save()
def delete(self):
self.features.all().delete()
super().delete()
class Meta(OrderedModel.Meta):
pass
| {
"content_hash": "c46499bf36adf7568124c8b34973e569",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 119,
"avg_line_length": 34.56357388316151,
"alnum_prop": 0.650526943726387,
"repo_name": "python/pythondotorg",
"id": "ad2c4b8b1f4e3fdb8931d16a72bcb117a7a6a237",
"size": "10058",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sponsors/models/sponsors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_analytics', '0003_auto_20150810_1710'),
]
operations = [
migrations.RemoveField(
model_name='maltrow',
name='is_web_user',
),
]
| {
"content_hash": "bb0a1b7d4e579a1720ff618ec7b6c0e3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 19.58823529411765,
"alnum_prop": 0.5915915915915916,
"repo_name": "qedsoftware/commcare-hq",
"id": "e8872d01ad756fe5dd1b6f949c81ccee618088f3",
"size": "357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/data_analytics/migrations/0004_auto_20150810_1710.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
from django.template import Template
from django.core.exceptions import ValidationError
from .constants import *
import hashlib, re
class Agent(models.Model):
protocol = models.CharField(max_length=16)
host = models.CharField(max_length=128)
port = models.IntegerField(default=80)
path = models.CharField(max_length=1024)
method = models.CharField(max_length=16)
content_type = models.CharField(max_length=64, null=True, blank=True)
responder = models.CharField(default=SIMPLE_MOCK_RESPONDER[0], max_length=64, choices=MOCK_RESPONDERS)
created = models.DateTimeField(default=timezone.now)
class Meta:
unique_together = ('protocol', 'host', 'port', 'path', 'method')
def __str__(self):
return '[{}] {}'.format(self.method, self.url())
def hash(self):
return hashlib.md5(str(self).encode('utf-8')).hexdigest()
def get_content_type(self):
return self.content_type
def url(self):
return '{}://{}:{}{}'.format(self.protocol, self.host, self.port, self.path)
def wsdl_url(self):
if self.content_type == CONTENT_TYPE_XML:
return self.url() + '?wsdl'
else:
raise Exception('There is no WSDL for Agent when content-type is different than {}'.format(CONTENT_TYPE_XML))
def match(self, url):
return re.match(self.url() + r'$', url) != None
class Operation(models.Model):
agent = models.ForeignKey(Agent, on_delete=models.CASCADE, related_name='operations')
name = models.CharField(max_length=128)
input_message = models.CharField(max_length=128)
output_message = models.CharField(max_length=128, null=True, blank=True)
responder = models.CharField(default=SIMPLE_MOCK_RESPONDER[0], max_length=64, choices=MOCK_RESPONDERS)
def __str__(self):
return '{} [{}]'.format(self.agent, self.name)
def hash(self):
return hashlib.md5(str(self).encode('utf-8')).hexdigest()
def get_content_type(self):
return self.agent.content_type
def belongs_to(self, request):
regex = re.compile(r'<(.+:)?{}'.format(self.input_message))
if regex.search(request.body.decode(encoding='UTF-8')):
return True
return False
class Response(models.Model):
agent = models.ForeignKey(Agent, on_delete=models.CASCADE, null=True, blank=True, related_name='responses')
operation = models.ForeignKey(Operation, on_delete=models.CASCADE, null=True, blank=True, related_name='responses')
label = models.CharField(max_length=256)
http_code = models.IntegerField(default=HTTP_CODES[0][0], choices=HTTP_CODES)
content = models.TextField()
enable = models.BooleanField(default=True)
def __str__(self):
if self.agent != None:
return '{} [{}] {}'.format(self.agent, self.http_code, self.label)
elif self.operation != None:
return '{} [{}] {}'.format(self.operation, self.http_code, self.label)
else:
return '[{}] {}'.format(self.http_code, self.label)
def hash(self):
return hashlib.md5(str(self).encode('utf-8')).hexdigest()
def template(self):
return Template(self.content)
def clean(self):
if self.agent == None and self.operation == None:
raise ValidationError('Agent or Operation is required')
elif self.agent != None and self.operation != None:
raise ValidationError('Agent or Operation is required, you can\'t fill both')
class Filter(models.Model):
agent = models.ForeignKey(Agent, on_delete=models.CASCADE, null=True, blank=True, related_name='filters')
operation = models.ForeignKey(Operation, on_delete=models.CASCADE, null=True, blank=True, related_name='filters')
label = models.CharField(max_length=256)
priority = models.IntegerField(default=0)
enable = models.BooleanField(default=True)
class Meta:
ordering = ['agent_id', 'priority']
def __str__(self):
return '{} {} [{}] {}'.format(self.label, self.request_conditions.all(), self.priority, self.agent)
def hash(self):
return hashlib.md5(str(self).encode('utf-8')).hexdigest()
def clean(self):
if self.agent == None and self.operation == None:
raise ValidationError('Agent or Operation is required')
elif self.agent != None and self.operation != None:
raise ValidationError('Agent or Operation is required, you can\'t fill both')
def evaluate_request(self, request):
result = True
for c in self.request_conditions.all():
# Define source string
if c.field_type == 'CONTENT':
input_value = request.body.decode("utf-8")
elif c.field_type == 'HEADER':
input_value = request.META[c.header_or_query_param]
elif c.field_type == 'QUERY_PARAM':
input_value = request.GET[c.header_or_query_param] if len(request.GET) > len(request.POST) else request.POST[c.header_or_query_param]
# Return according with operator
if c.operator == 'EQUALS':
result = c.value == input_value
elif c.operator == 'NOTEQUALS':
result = c.value != input_value
elif c.operator == 'CONTAINS':
result = c.value in input_value
elif c.operator == 'STARTSWITH':
result = input_value.startswith(c.value)
elif c.operator == 'ENDSWITH':
result = input_value.endswith(c.value)
elif c.operator == 'REGEX':
regex = re.compile(c.value)
result = len(regex.findall(input_value)) > 0
if not result:
return result
return result
class Condition(models.Model):
value = models.CharField(max_length=256)
class Meta:
abstract = True
def __str__(self):
return '[{}] [{}] [{}]'.format(self.field_type, self.operator, self.value)
class RequestCondition(Condition):
filter = models.ForeignKey(Filter, on_delete=models.CASCADE, related_name='request_conditions')
field_type = models.CharField(max_length=32, choices=REQUEST_FIELD_TYPES)
operator = models.CharField(max_length=16, choices=REQUEST_CONDITION_OPERATORS)
header_or_query_param = models.CharField(max_length=64, null=True, blank=True)
def clean(self):
if self.field_type in ['HEADER', 'QUERY_PARAM'] and (self.header_or_query_param == None or self.header_or_query_param == ''): # HEADER
raise ValidationError('Header field is required when condition filter type is set to HTTP Header or Query Parameter')
class ResponseCondition(Condition):
filter = models.ForeignKey(Filter, on_delete=models.CASCADE, related_name='response_conditions')
field_type = models.CharField(max_length=32, choices=RESPONSE_FIELD_TYPES)
operator = models.CharField(max_length=16, choices=RESPONSE_CONDITION_OPERATORS)
| {
"content_hash": "d630ff9e0b4a67561cd04fe3722cd5b1",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 149,
"avg_line_length": 40.883720930232556,
"alnum_prop": 0.64419795221843,
"repo_name": "rodrigozc/mockatron",
"id": "34bdccf9fc48b0c1b2f1e61de937ff5582dcdf0c",
"size": "7032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mockatron_core/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "265"
},
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "HTML",
"bytes": "45502"
},
{
"name": "JavaScript",
"bytes": "1651"
},
{
"name": "Python",
"bytes": "52376"
},
{
"name": "TypeScript",
"bytes": "55828"
}
],
"symlink_target": ""
} |
"""
Tellstick Component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tellstick/
"""
import logging
import threading
import voluptuous as vol
from homeassistant.helpers import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers.entity import Entity
DOMAIN = 'tellstick'
REQUIREMENTS = ['tellcore-py==1.1.2']
_LOGGER = logging.getLogger(__name__)
ATTR_SIGNAL_REPETITIONS = 'signal_repetitions'
DEFAULT_SIGNAL_REPETITIONS = 1
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_DISCOVER_CONFIG = 'config'
# Use a global tellstick domain lock to handle Tellcore errors then calling
# to concurrently
TELLSTICK_LOCK = threading.Lock()
# Keep a reference the the callback registry. Used from entities that register
# callback listeners
TELLCORE_REGISTRY = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(ATTR_SIGNAL_REPETITIONS,
default=DEFAULT_SIGNAL_REPETITIONS): vol.Coerce(int),
}),
}, extra=vol.ALLOW_EXTRA)
def _discover(hass, config, found_devices, component_name):
"""Setup and send the discovery event."""
if not len(found_devices):
return
_LOGGER.info(
"Discovered %d new %s devices", len(found_devices), component_name)
signal_repetitions = config[DOMAIN].get(ATTR_SIGNAL_REPETITIONS)
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices,
ATTR_DISCOVER_CONFIG: signal_repetitions}, config)
def setup(hass, config):
"""Setup the Tellstick component."""
# pylint: disable=global-statement, import-error
global TELLCORE_REGISTRY
import tellcore.telldus as telldus
import tellcore.constants as tellcore_constants
from tellcore.library import DirectCallbackDispatcher
core = telldus.TelldusCore(callback_dispatcher=DirectCallbackDispatcher())
TELLCORE_REGISTRY = TellstickRegistry(hass, core)
devices = core.devices()
# Register devices
TELLCORE_REGISTRY.register_devices(devices)
# Discover the switches
_discover(hass, config, [switch.id for switch in
devices if not switch.methods(
tellcore_constants.TELLSTICK_DIM)],
'switch')
# Discover the lights
_discover(hass, config, [light.id for light in
devices if light.methods(
tellcore_constants.TELLSTICK_DIM)],
'light')
return True
class TellstickRegistry(object):
"""Handle everything around Tellstick callbacks.
Keeps a map device ids to home-assistant entities.
Also responsible for registering / cleanup of callbacks.
All device specific logic should be elsewhere (Entities).
"""
def __init__(self, hass, tellcore_lib):
"""Initialize the Tellstick mappings and callbacks."""
self._core_lib = tellcore_lib
# used when map callback device id to ha entities.
self._id_to_entity_map = {}
self._id_to_device_map = {}
self._setup_device_callback(hass, tellcore_lib)
def _device_callback(self, tellstick_id, method, data, cid):
"""Handle the actual callback from Tellcore."""
entity = self._id_to_entity_map.get(tellstick_id, None)
if entity is not None:
entity.set_tellstick_state(method, data)
entity.update_ha_state()
def _setup_device_callback(self, hass, tellcore_lib):
"""Register the callback handler."""
callback_id = tellcore_lib.register_device_event(self._device_callback)
def clean_up_callback(event):
"""Unregister the callback bindings."""
if callback_id is not None:
tellcore_lib.unregister_callback(callback_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, clean_up_callback)
def register_entity(self, tellcore_id, entity):
"""Register a new entity to receive callback updates."""
self._id_to_entity_map[tellcore_id] = entity
def register_devices(self, devices):
"""Register a list of devices."""
self._id_to_device_map.update(
{device.id: device for device in devices})
def get_device(self, tellcore_id):
"""Return a device by tellcore_id."""
return self._id_to_device_map.get(tellcore_id, None)
class TellstickDevice(Entity):
"""Representation of a Tellstick device.
Contains the common logic for all Tellstick devices.
"""
def __init__(self, tellstick_device, signal_repetitions):
"""Initalize the Tellstick device."""
self.signal_repetitions = signal_repetitions
self._state = None
self.tellstick_device = tellstick_device
# Add to id to entity mapping
TELLCORE_REGISTRY.register_entity(tellstick_device.id, self)
# Query tellcore for the current state
self.update()
@property
def should_poll(self):
"""Tell Home Assistant not to poll this entity."""
return False
@property
def assumed_state(self):
"""Tellstick devices are always assumed state."""
return True
@property
def name(self):
"""Return the name of the switch if any."""
return self.tellstick_device.name
def set_tellstick_state(self, last_command_sent, last_data_sent):
"""Set the private switch state."""
raise NotImplementedError(
"set_tellstick_state needs to be implemented.")
def _send_tellstick_command(self, command, data):
"""Do the actual call to the tellstick device."""
raise NotImplementedError(
"_call_tellstick needs to be implemented.")
def call_tellstick(self, command, data=None):
"""Send a command to the device."""
from tellcore.library import TelldusError
with TELLSTICK_LOCK:
try:
for _ in range(self.signal_repetitions):
self._send_tellstick_command(command, data)
# Update the internal state
self.set_tellstick_state(command, data)
self.update_ha_state()
except TelldusError:
_LOGGER.error(TelldusError)
def update(self):
"""Poll the current state of the device."""
import tellcore.constants as tellcore_constants
from tellcore.library import TelldusError
try:
last_command = self.tellstick_device.last_sent_command(
tellcore_constants.TELLSTICK_TURNON |
tellcore_constants.TELLSTICK_TURNOFF |
tellcore_constants.TELLSTICK_DIM
)
last_value = self.tellstick_device.last_sent_value()
self.set_tellstick_state(last_command, last_value)
except TelldusError:
_LOGGER.error(TelldusError)
| {
"content_hash": "89e7310fa827774eb5dd36de4de88e62",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 33.36538461538461,
"alnum_prop": 0.6498559077809798,
"repo_name": "srcLurker/home-assistant",
"id": "d2e296d61b6d6cb30cacf401de0954aee3a8c899",
"size": "6940",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tellstick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446622"
},
{
"name": "Python",
"bytes": "3984365"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
from ..azure_common import BaseTest, arm_template
class VMSSTest(BaseTest):
def setUp(self):
super(VMSSTest, self).setUp()
def test_validate_vmss_schemas(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-vmss',
'resource': 'azure.vmss'
}, validate=True)
self.assertTrue(p)
@arm_template('vmss.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-vm-scale-set',
'resource': 'azure.vmss',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'cctestvmss'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
| {
"content_hash": "39446f5716e0a7c5ffd7ae373db61d4b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 49,
"avg_line_length": 27.266666666666666,
"alnum_prop": 0.48166259168704156,
"repo_name": "capitalone/cloud-custodian",
"id": "507f09e16f788632923fb49da18a5d902ee8548b",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests_azure/tests_resources/test_vmss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PodcastCategory'
db.create_table('podcasts_podcastcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('order', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal('podcasts', ['PodcastCategory'])
# Adding model 'Podcast'
db.create_table('podcasts_podcast', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('title', self.gf('django.db.models.fields.TextField')(null=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('rss_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['podcasts.PodcastCategory'], null=True)),
('most_recent_item_date', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('medium', self.gf('django.db.models.fields.CharField')(max_length=8, null=True)),
('provider', self.gf('django.db.models.fields.TextField')()),
('license', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
('logo', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
))
db.send_create_signal('podcasts', ['Podcast'])
# Adding model 'PodcastItem'
db.create_table('podcasts_podcastitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('podcast', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['podcasts.Podcast'])),
('title', self.gf('django.db.models.fields.TextField')(null=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('published_date', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('author', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('duration', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('guid', self.gf('django.db.models.fields.TextField')()),
('order', self.gf('django.db.models.fields.IntegerField')(null=True)),
('license', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
))
db.send_create_signal('podcasts', ['PodcastItem'])
# Adding model 'PodcastEnclosure'
db.create_table('podcasts_podcastenclosure', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('podcast_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['podcasts.PodcastItem'])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('length', self.gf('django.db.models.fields.IntegerField')(null=True)),
('mimetype', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('podcasts', ['PodcastEnclosure'])
def backwards(self, orm):
# Deleting model 'PodcastCategory'
db.delete_table('podcasts_podcastcategory')
# Deleting model 'Podcast'
db.delete_table('podcasts_podcast')
# Deleting model 'PodcastItem'
db.delete_table('podcasts_podcastitem')
# Deleting model 'PodcastEnclosure'
db.delete_table('podcasts_podcastenclosure')
models = {
'podcasts.podcast': {
'Meta': {'ordering': "('title',)", 'object_name': 'Podcast'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.PodcastCategory']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'most_recent_item_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'provider': ('django.db.models.fields.TextField', [], {}),
'rss_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'podcasts.podcastcategory': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'PodcastCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'podcasts.podcastenclosure': {
'Meta': {'object_name': 'PodcastEnclosure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mimetype': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'podcast_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.PodcastItem']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'podcasts.podcastitem': {
'Meta': {'object_name': 'PodcastItem'},
'author': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'podcast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.Podcast']"}),
'published_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['podcasts']
| {
"content_hash": "56cd682b116bd54ab1493efb820de15c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 134,
"avg_line_length": 60.43089430894309,
"alnum_prop": 0.5806538409794161,
"repo_name": "mollyproject/mollyproject",
"id": "41d7e92d7c5534e7e4b6c7cc1b949bc7d1d884b7",
"size": "7451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molly/apps/podcasts/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90319"
},
{
"name": "JavaScript",
"bytes": "76592"
},
{
"name": "Python",
"bytes": "1120664"
},
{
"name": "Shell",
"bytes": "4042"
},
{
"name": "XSLT",
"bytes": "11864"
}
],
"symlink_target": ""
} |
import rados, sys
cluster = rados.Rados(conffile='ceph.conf')
cluster.connect()
cluster_stats = cluster.get_cluster_stats()
pools = cluster.list_pools()
for pool in pools:
print pool
| {
"content_hash": "83c165f2d25a8cc70e8a4dbaefa63afb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 43,
"avg_line_length": 17.181818181818183,
"alnum_prop": 0.7354497354497355,
"repo_name": "motobyus/moto",
"id": "f40a265ea80b7d788710d6f90a7e84f0e6a681d7",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex/exCeph/libradosTest/pool/librados_poolList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3850"
},
{
"name": "HTML",
"bytes": "26974"
},
{
"name": "JavaScript",
"bytes": "32091"
},
{
"name": "Python",
"bytes": "136220"
},
{
"name": "R",
"bytes": "117"
},
{
"name": "Shell",
"bytes": "1695"
},
{
"name": "Vim script",
"bytes": "1108"
}
],
"symlink_target": ""
} |
import os
import unittest
from captcha import fields
from django.forms import Form
class TestForm(Form):
captcha = fields.ReCaptchaField(attrs={'theme': 'white'})
class TestCase(unittest.TestCase):
def setUp(self):
os.environ['RECAPTCHA_TESTING'] = 'True'
def test_envvar_enabled(self):
form_params = {'recaptcha_response_field': 'PASSED'}
form = TestForm(form_params)
self.assertTrue(form.is_valid())
def test_envvar_disabled(self):
os.environ['RECAPTCHA_TESTING'] = 'False'
form_params = {'recaptcha_response_field': 'PASSED'}
form = TestForm(form_params)
self.assertFalse(form.is_valid())
def tearDown(self):
del os.environ['RECAPTCHA_TESTING']
| {
"content_hash": "0555c35e2bc1e809d7a3cae9191c5513",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6577540106951871,
"repo_name": "JioCloud/django-recaptcha",
"id": "622ef7cee193285d48d8fcb755094b907ebf288d",
"size": "748",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "captcha/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10494"
}
],
"symlink_target": ""
} |
import mock
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import attach_interfaces \
as attach_interfaces_v21
from nova.compute import api as compute_api
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network_cache_model
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NOT_FOUND_PORT_ID = '00000000-0000-0000-0000-000000000000'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_show_port(context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
else:
raise exception.PortNotFound(port_id=port_id)
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3', tag=None):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
if port_id == FAKE_NOT_FOUND_PORT_ID:
raise exception.PortNotFound(port_id=port_id)
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, *args, **kwargs):
return objects.Instance(uuid=FAKE_UUID1)
class InterfaceAttachTestsV21(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
validate_exc = exception.ValidationError
in_use_exc = exc.HTTPConflict
not_found_exc = exc.HTTPNotFound
not_usable_exc = exc.HTTPBadRequest
def setUp(self):
super(InterfaceAttachTestsV21, self).setUp()
self.flags(timeout=30, group='neutron')
self.stub_out('nova.compute.api.API.get', fake_get_instance)
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
self.attachments = self.controller_cls()
show_port_patch = mock.patch.object(self.attachments.network_api,
'show_port', fake_show_port)
show_port_patch.start()
self.addCleanup(show_port_patch.stop)
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=''))
def _test_instance_not_found(self, func, args, mock_get, kwargs=None):
if not kwargs:
kwargs = {}
self.assertRaises(exc.HTTPNotFound, func, self.req, *args, **kwargs)
def test_show_instance_not_found(self):
self._test_instance_not_found(self.attachments.show, ('fake', 'fake'))
def test_index_instance_not_found(self):
self._test_instance_not_found(self.attachments.index, ('fake', ))
def test_detach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.delete,
('fake', 'fake'))
def test_attach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.create, ('fake', ),
kwargs={'body': {'interfaceAttachment': {}}})
def test_show(self):
result = self.attachments.show(self.req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_with_port_not_found(self):
self.assertRaises(exc.HTTPNotFound,
self.attachments.show, self.req, FAKE_UUID2,
FAKE_PORT_ID1)
def test_show_forbidden(self):
with mock.patch.object(self.attachments.network_api, 'show_port',
side_effect=exception.Forbidden):
self.assertRaises(exc.HTTPForbidden,
self.attachments.show, self.req, FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete(self):
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface)
inst = objects.Instance(uuid=FAKE_UUID1)
with mock.patch.object(common, 'get_instance',
return_value=inst) as mock_get_instance:
result = self.attachments.delete(self.req, FAKE_UUID1,
FAKE_PORT_ID1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
attach_interfaces_v21.InterfaceAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
ctxt = self.req.environ['nova.context']
mock_get_instance.assert_called_with(
self.attachments.compute_api, ctxt, FAKE_UUID1,
expected_attrs=['device_metadata'])
def test_detach_interface_instance_locked(self):
def fake_detach_interface_from_locked_server(self, context,
instance, port_id):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface_from_locked_server)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete_interface_not_found(self):
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface)
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
self.req,
FAKE_UUID1,
'invalid-port-id')
def test_attach_interface_instance_locked(self):
def fake_attach_interface_to_locked_server(self, context,
instance, network_id, port_id, requested_ip, tag=None):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface_to_locked_server)
body = {}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_without_network_id(self):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
@mock.patch.object(
compute_api.API, 'attach_interface',
side_effect=exception.NetworkInterfaceTaggedAttachNotSupported())
def test_interface_tagged_attach_not_supported(self, mock_attach):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_attach_interface_with_network_id(self):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def _attach_interface_bad_request_case(self, body):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def _attach_interface_not_found_case(self, body):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
self.assertRaises(self.not_found_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_with_port_and_network_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2
}
}
self._attach_interface_bad_request_case(body)
def test_attach_interface_with_not_found_network_id(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_BAD_NET_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_not_found_port_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_NOT_FOUND_PORT_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_invalid_state(self):
def fake_attach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='attach_interface')
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface_invalid_state)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_detach_interface_with_invalid_state(self):
def fake_detach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='detach_interface')
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface_invalid_state)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'detach_interface',
side_effect=NotImplementedError())
def test_detach_interface_with_not_implemented(self, _mock):
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.delete,
self.req, FAKE_UUID1, FAKE_NET_ID1)
def test_attach_interface_invalid_fixed_ip(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_NET_ID1,
'fixed_ips': [{'ip_address': 'invalid_ip'}]
}
}
self.assertRaises(self.validate_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_fixed_ip_already_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortInUse(
port_id=FAKE_PORT_ID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_not_usable(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortNotUsable(
port_id=FAKE_PORT_ID1,
instance=fake_instance.uuid)
body = {}
self.assertRaises(self.not_usable_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_no_network(self, attach_mock, get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=FAKE_UUID2)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.InterfaceAttachFailedNoNetwork(project_id=FAKE_UUID2))
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_no_more_fixed_ips(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.NoMoreFixedIps(
net=FAKE_NET_ID1)
body = {}
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_securitygroup_cannot_be_applied(
self, attach_mock, get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=FAKE_UUID2)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.SecurityGroupCannotBeApplied())
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None)
def _test_attach_interface_with_invalid_parameter(self, param):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {'interface_attachment': param}
self.assertRaises(exception.ValidationError,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_instance_with_non_uuid_net_id(self):
param = {'net_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_uuid_port_id(self):
param = {'port_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_array_fixed_ips(self):
param = {'fixed_ips': 'non_array'}
self._test_attach_interface_with_invalid_parameter(param)
class InterfaceAttachTestsV249(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
def setUp(self):
super(InterfaceAttachTestsV249, self).setUp()
self.attachments = self.controller_cls()
self.req = fakes.HTTPRequest.blank('', version='2.49')
def test_tagged_interface_attach_invalid_tag_comma(self):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': ','}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_tagged_interface_attach_invalid_tag_slash(self):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': '/'}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_tagged_interface_attach_invalid_tag_too_long(self):
tag = ''.join(map(str, range(10, 41)))
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': tag}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
@mock.patch('nova.compute.api.API.attach_interface')
@mock.patch('nova.compute.api.API.get', fake_get_instance)
def test_tagged_interface_attach_valid_tag(self, _):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': 'foo'}}
with mock.patch.object(self.attachments, 'show'):
self.attachments.create(self.req, FAKE_UUID1, body=body)
class AttachInterfacesPolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(AttachInterfacesPolicyEnforcementv21, self).setUp()
self.controller = \
attach_interfaces_v21.InterfaceAttachmentController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-attach-interfaces"
self.policy.set_rules({self.rule_name: "project:non_fake"})
def test_index_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_show_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_create_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID, body={})
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_delete_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_attach_interfaces_create_policy_failed(self):
self.policy.set_rules({self.rule_name: "@",
'os_compute_api:os-attach-interfaces:create':
"!"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID, body={})
self.assertEqual(
"Policy doesn't allow os_compute_api:os-attach-interfaces:create "
"to be performed.", exc.format_message())
def test_attach_interfaces_delete_policy_failed(self):
self.policy.set_rules({self.rule_name: "@",
'os_compute_api:os-attach-interfaces:delete':
"!"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow os_compute_api:os-attach-interfaces:delete "
"to be performed.", exc.format_message())
| {
"content_hash": "05b2f80d5af0431d5b5c178692a95d27",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 79,
"avg_line_length": 43.050269299820464,
"alnum_prop": 0.5807581633929688,
"repo_name": "jianghuaw/nova",
"id": "3b13cd78e0d06d1fc9106653138b0f898bdaaea5",
"size": "24604",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_attach_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
class HTTPError(IOError):
"""Unified HTTPError used across all http_client implementations.
"""
def __init__(self, response, message=None, swagger_result=None):
"""
:type response: :class:`bravado_core.response.IncomingResponse`
:param message: Optional string message
:param swagger_result: If the response for this HTTPError is
documented in the swagger spec, then this should be the result
value of the response.
"""
self.response = response
self.message = message
self.swagger_result = swagger_result
def __str__(self):
# Try to surface the most useful/relevant information available
# since this is the first thing a developer sees when bad things
# happen.
status_and_reason = str(self.response)
message = ': ' + self.message if self.message else ''
result = ': {0}'.format(self.swagger_result) \
if self.swagger_result is not None else ''
return '{0}{1}{2}'.format(status_and_reason, message, result)
| {
"content_hash": "37465c797cff9d43c0127f1e2eba997b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 43.28,
"alnum_prop": 0.6312384473197782,
"repo_name": "MphasisWyde/eWamSublimeAdaptor",
"id": "39c33c0bb323b8a8ec351d1970f52ce1e89f4291",
"size": "1108",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "POC/v0_4_POC_with_generic_cmd_and_swagger/third-party/bravado/exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "338"
},
{
"name": "JavaScript",
"bytes": "18521"
},
{
"name": "Python",
"bytes": "141245"
}
],
"symlink_target": ""
} |
'''
Return/control aspects of the grains data
'''
# Import python libs
from __future__ import print_function
import collections
import math
import operator
import os
import random
import yaml
import logging
# Import salt libs
import salt.utils
import salt.utils.dictupdate
from salt.exceptions import SaltException
__proxyenabled__ = ['*']
# Seed the grains dict so cython will build
__grains__ = {}
# Change the default outputter to make it more readable
__outputter__ = {
'items': 'grains',
'item': 'grains',
'setval': 'grains',
}
# http://stackoverflow.com/a/12414913/127816
_infinitedict = lambda: collections.defaultdict(_infinitedict)
log = logging.getLogger(__name__)
def _serial_sanitizer(instr):
'''Replaces the last 1/4 of a string with X's'''
length = len(instr)
index = int(math.floor(length * .75))
return '{0}{1}'.format(instr[:index], 'X' * (length - index))
_FQDN_SANITIZER = lambda x: 'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: 'MINION'
_DOMAINNAME_SANITIZER = lambda x: 'DOMAINNAME'
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
'serialnumber': _serial_sanitizer,
'domain': _DOMAINNAME_SANITIZER,
'fqdn': _FQDN_SANITIZER,
'id': _FQDN_SANITIZER,
'host': _HOSTNAME_SANITIZER,
'localhost': _HOSTNAME_SANITIZER,
'nodename': _HOSTNAME_SANITIZER,
}
def get(key, default='', delim=':'):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in grains looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
delim
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: Helium
CLI Example:
.. code-block:: bash
salt '*' grains.get pkg:apache
'''
return salt.utils.traverse_dict_and_list(__grains__, key, default, delim)
def has_value(key):
'''
Determine whether a named value exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
{'pkg': {'apache': 'httpd'}}
One would determine if the apache key in the pkg dict exists by::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.has_value pkg:apache
'''
return True if salt.utils.traverse_dict_and_list(__grains__, key, False) else False
def items(sanitize=False):
'''
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
'''
if salt.utils.is_true(sanitize):
out = dict(__grains__)
for key, func in _SANITIZERS.items():
if key in out:
out[key] = func(out[key])
return out
else:
return __grains__
def item(*args, **kwargs):
'''
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
'''
ret = {}
for arg in args:
try:
ret[arg] = __grains__[arg]
except KeyError:
pass
if salt.utils.is_true(kwargs.get('sanitize')):
for arg, func in _SANITIZERS.items():
if arg in ret:
ret[arg] = func(ret[arg])
return ret
def setvals(grains, destructive=False):
'''
Set new grains values in the grains config file
:param Destructive: If an operation results in a key being removed, delete the key, too. Defaults to False.
CLI Example:
.. code-block:: bash
salt '*' grains.setvals "{'key1': 'val1', 'key2': 'val2'}"
'''
new_grains = grains
if not isinstance(new_grains, collections.Mapping):
raise SaltException('setvals grains must be a dictionary.')
grains = {}
if os.path.isfile(__opts__['conf_file']):
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
elif os.path.isdir(__opts__['conf_file']):
gfn = os.path.join(
__opts__['conf_file'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
if os.path.isfile(gfn):
with salt.utils.fopen(gfn, 'rb') as fp_:
try:
grains = yaml.safe_load(fp_.read())
except Exception as e:
return 'Unable to read existing grains file: {0}'.format(e)
if not isinstance(grains, dict):
grains = {}
for key, val in new_grains.items():
if val is None and destructive is True:
if key in grains:
del grains[key]
if key in __grains__:
del __grains__[key]
else:
grains[key] = val
__grains__[key] = val
# Cast defaultdict to dict; is there a more central place to put this?
yaml.representer.SafeRepresenter.add_representer(collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict)
cstr = yaml.safe_dump(grains, default_flow_style=False)
try:
with salt.utils.fopen(gfn, 'w+') as fp_:
fp_.write(cstr)
except (IOError, OSError):
msg = 'Unable to write to grains file at {0}. Check permissions.'
log.error(msg.format(gfn))
fn_ = os.path.join(__opts__['cachedir'], 'module_refresh')
try:
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write('')
except (IOError, OSError):
msg = 'Unable to write to cache file {0}. Check permissions.'
log.error(msg.format(fn_))
# Sync the grains
__salt__['saltutil.sync_grains']()
# Return the grains we just set to confirm everything was OK
return new_grains
def setval(key, val, destructive=False):
'''
Set a grains value in the grains config file
:param Destructive: If an operation results in a key being removed, delete the key, too. Defaults to False.
CLI Example:
.. code-block:: bash
salt '*' grains.setval key val
salt '*' grains.setval key "{'sub-key': 'val', 'sub-key2': 'val2'}"
'''
return setvals({key: val}, destructive)
def append(key, val, convert=False):
'''
.. versionadded:: 0.17.0
Append a value to a list in the grains config file. If the grain doesn't
exist, the grain key is added and the value is appended to the new grain
as a list item.
key
The grain key to be appended to
val
The value to append to the grain key
:param convert: If convert is True, convert non-list contents into a list.
If convert is False and the grain contains non-list contents, an error
is given. Defaults to False.
CLI Example:
.. code-block:: bash
salt '*' grains.append key val
'''
grains = get(key, [])
if not isinstance(grains, list) and convert is True:
grains = [grains]
if not isinstance(grains, list):
return 'The key {0} is not a valid list'.format(key)
if val in grains:
return 'The val {0} was already in the list {1}'.format(val, key)
grains.append(val)
return setval(key, grains)
def remove(key, val):
'''
.. versionadded:: 0.17.0
Remove a value from a list in the grains config file
CLI Example:
.. code-block:: bash
salt '*' grains.remove key val
'''
grains = get(key, [])
if not isinstance(grains, list):
return 'The key {0} is not a valid list'.format(key)
if val not in grains:
return 'The val {0} was not in the list {1}'.format(val, key)
grains.remove(val)
return setval(key, grains)
def delval(key, destructive=False):
'''
.. versionadded:: 0.17.0
Delete a grain from the grains config file
:param Destructive: Delete the key, too. Defaults to False.
CLI Example:
.. code-block:: bash
salt '*' grains.delval key
'''
setval(key, None, destructive=destructive)
def ls(): # pylint: disable=C0103
'''
Return a list of all available grains
CLI Example:
.. code-block:: bash
salt '*' grains.ls
'''
return sorted(__grains__)
def filter_by(lookup_dict, grain='os_family', merge=None, default='default'):
'''
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
'default': 'Debian',
}) %}
myapache:
pkg:
- installed
- name: {{ apache.pkg }}
service:
- running
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0 (Hydrogen)
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
'''
ret = lookup_dict.get(
__grains__.get(
grain, default),
lookup_dict.get(
default, None)
)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge
else:
salt.utils.dictupdate.update(ret, merge)
return ret
def _dict_from_path(path, val, delim=':'):
'''
Given a lookup string in the form of 'foo:bar:baz" return a nested
dictionary of the appropriate depth with the final segment as a value.
>>> _dict_from_path('foo:bar:baz', 'somevalue')
{"foo": {"bar": {"baz": "somevalue"}}
'''
nested_dict = _infinitedict()
keys = path.rsplit(delim)
lastplace = reduce(operator.getitem, keys[:-1], nested_dict)
lastplace[keys[-1]] = val
return nested_dict
def get_or_set_hash(name,
length=8,
chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'):
'''
Perform a one-time generation of a hash and write it to the local grains.
If that grain has already been set return the value instead.
This is useful for generating passwords or keys that are specific to a
single minion that don't need to be stored somewhere centrally.
State Example:
.. code-block:: yaml
some_mysql_user:
mysql_user:
- present
- host: localhost
- password: {{ salt['grains.get_or_set_hash']('mysql:some_mysql_user') }}
CLI Example:
.. code-block:: bash
salt '*' grains.get_or_set_hash 'django:SECRET_KEY' 50
'''
ret = get(name, None)
if ret is None:
val = ''.join([random.choice(chars) for _ in range(length)])
if ':' in name:
name, rest = name.split(':', 1)
val = _dict_from_path(rest, val)
setval(name, val)
return get(name)
| {
"content_hash": "3ccc00452a1bb2a4bd3f4d5ac59fe3aa",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 111,
"avg_line_length": 27.358299595141702,
"alnum_prop": 0.5985941546429893,
"repo_name": "MadeiraCloud/salt",
"id": "abf7529f743a65ada92c85c477d861ebc1dd8a62",
"size": "13539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/salt/modules/grains.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10058"
},
{
"name": "Makefile",
"bytes": "1815"
},
{
"name": "Python",
"bytes": "4530204"
},
{
"name": "Shell",
"bytes": "169676"
}
],
"symlink_target": ""
} |
class CarCompany(object):
has_money = True
def create_factory(self):
while self.has_money:
yield "wrrrooommm... a brand new car rolled out!"
def test():
from simpletest import _assert, _assert_raises
gm = CarCompany()
detroit_factory = gm.create_factory()
_assert(detroit_factory.next(), "wrrrooommm... a brand new car rolled out!")
_assert(detroit_factory.next(), "wrrrooommm... a brand new car rolled out!")
gm.has_money = False
# No money, no cars
_assert_raises(StopIteration, detroit_factory.next)
gm.has_money = True
# There's money but the factory collapsed.
_assert_raises(StopIteration, detroit_factory.next)
if __name__ == '__main__':
test()
| {
"content_hash": "4b88807308bfe48b4a52b724360f3666",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 80,
"avg_line_length": 30.541666666666668,
"alnum_prop": 0.6507503410641201,
"repo_name": "pgularski/snippets",
"id": "d96ee300fc81f1b079a81a1ed4c549bb656a1040",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/misc/generators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67595"
}
],
"symlink_target": ""
} |
"""
A file to contain specific logic to handle version upgrades in Kolibri.
"""
import logging
from django.db.models import F
from django.db.models import OuterRef
from django.db.models import Subquery
from morango.models.core import UUIDField
from kolibri.core.auth.models import Collection
from kolibri.core.exams.models import Exam
from kolibri.core.exams.models import ExamAssignment
from kolibri.core.upgrade import version_upgrade
logger = logging.getLogger(__name__)
@version_upgrade(old_version="<0.15.0")
def resolve_conflicting_datasets_for_exams_and_related_models():
"""
Superusers could create exams or assignments in a different facility than the facility they
reside in, which caused a mismatch in the exam dataset and prevented syncing
"""
# un-set creator for all exams created by user in a different dataset
exam_sub_query = Subquery(
Collection.objects.filter(pk=OuterRef("collection_id")).values("dataset_id")[
:1
],
output_field=UUIDField(),
)
Exam.objects.exclude(collection__dataset_id=F("dataset_id")).update(
creator=None, dataset_id=exam_sub_query
)
# un-set assigned_by for all exam assignments assigned by a user in a different dataset
assignment_sub_query = Subquery(
Exam.objects.filter(pk=OuterRef("exam_id")).values("dataset_id")[:1],
output_field=UUIDField(),
)
ExamAssignment.objects.exclude(exam__dataset_id=F("dataset_id")).update(
assigned_by=None, dataset_id=assignment_sub_query
)
| {
"content_hash": "c17b6188f37a77af73cac24760e75e2e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 95,
"avg_line_length": 36.04651162790697,
"alnum_prop": 0.7187096774193549,
"repo_name": "learningequality/kolibri",
"id": "403b6ec8c701759b78ae6cf4133f5a5b8094263e",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/exams/upgrade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3095586"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "Gherkin",
"bytes": "996801"
},
{
"name": "HTML",
"bytes": "22573"
},
{
"name": "JavaScript",
"bytes": "2233801"
},
{
"name": "Makefile",
"bytes": "12972"
},
{
"name": "Python",
"bytes": "3652744"
},
{
"name": "SCSS",
"bytes": "8551"
},
{
"name": "Shell",
"bytes": "3867"
},
{
"name": "Vue",
"bytes": "2193917"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
class brocade_vswitch(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def get_vnetwork_hosts_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
name = ET.SubElement(vnetwork_hosts, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_vmnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
vmnic = ET.SubElement(vnetwork_hosts, "vmnic")
vmnic.text = kwargs.pop('vmnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
datacenter = ET.SubElement(vnetwork_hosts, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
mac = ET.SubElement(vnetwork_hosts, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_vswitch(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
vswitch = ET.SubElement(vnetwork_hosts, "vswitch")
vswitch.text = kwargs.pop('vswitch')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
interface_type = ET.SubElement(vnetwork_hosts, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
interface_name = ET.SubElement(vnetwork_hosts, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
name = ET.SubElement(vnetwork_vms, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
mac = ET.SubElement(vnetwork_vms, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
datacenter = ET.SubElement(vnetwork_vms, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
ip = ET.SubElement(vnetwork_vms, "ip")
ip.text = kwargs.pop('ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_host_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
host_nn = ET.SubElement(vnetwork_vms, "host-nn")
host_nn.text = kwargs.pop('host_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
name = ET.SubElement(vnetwork_dvpgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
datacenter = ET.SubElement(vnetwork_dvpgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_dvs_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
dvs_nn = ET.SubElement(vnetwork_dvpgs, "dvs-nn")
dvs_nn.text = kwargs.pop('dvs_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
vlan = ET.SubElement(vnetwork_dvpgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
name = ET.SubElement(vnetwork_dvs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
host = ET.SubElement(vnetwork_dvs, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
datacenter = ET.SubElement(vnetwork_dvs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_pnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
pnic = ET.SubElement(vnetwork_dvs, "pnic")
pnic.text = kwargs.pop('pnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
interface_type = ET.SubElement(vnetwork_dvs, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
interface_name = ET.SubElement(vnetwork_dvs, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
name = ET.SubElement(vnetwork_vswitches, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
host = ET.SubElement(vnetwork_vswitches, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
datacenter = ET.SubElement(vnetwork_vswitches, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_pnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
pnic = ET.SubElement(vnetwork_vswitches, "pnic")
pnic.text = kwargs.pop('pnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
interface_type = ET.SubElement(vnetwork_vswitches, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
interface_name = ET.SubElement(vnetwork_vswitches, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
name = ET.SubElement(vnetwork_pgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
datacenter = ET.SubElement(vnetwork_pgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_vs_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vs_nn = ET.SubElement(vnetwork_pgs, "vs-nn")
vs_nn.text = kwargs.pop('vs_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vlan = ET.SubElement(vnetwork_pgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_host_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
host_nn = ET.SubElement(vnetwork_pgs, "host-nn")
host_nn.text = kwargs.pop('host_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
mac = ET.SubElement(input, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
mac = ET.SubElement(vmpolicy_macaddr, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
name = ET.SubElement(vmpolicy_macaddr, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
datacenter = ET.SubElement(vmpolicy_macaddr, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_dvpg_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
dvpg_nn = ET.SubElement(vmpolicy_macaddr, "dvpg-nn")
dvpg_nn.text = kwargs.pop('dvpg_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_port_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
port_nn = ET.SubElement(vmpolicy_macaddr, "port-nn")
port_nn.text = kwargs.pop('port_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_port_prof(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
port_prof = ET.SubElement(vmpolicy_macaddr, "port-prof")
port_prof.text = kwargs.pop('port_prof')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_url(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_vrf_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
vrf_name = ET.SubElement(credentials, "vrf-name")
vrf_name.text = kwargs.pop('vrf_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
activate = ET.SubElement(vcenter, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
interval = ET.SubElement(vcenter, "interval")
interval.text = kwargs.pop('interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_discovery_ignore_delete_all_response_ignore_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
discovery = ET.SubElement(vcenter, "discovery")
ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response")
ignore_value = ET.SubElement(ignore_delete_all_response, "ignore-value")
ignore_value.text = kwargs.pop('ignore_value')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_discovery_ignore_delete_all_response_always(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
discovery = ET.SubElement(vcenter, "discovery")
ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response")
always = ET.SubElement(ignore_delete_all_response, "always")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
input = ET.SubElement(get_vnetwork_hosts, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
name = ET.SubElement(vnetwork_hosts, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_vmnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
vmnic = ET.SubElement(vnetwork_hosts, "vmnic")
vmnic.text = kwargs.pop('vmnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
datacenter = ET.SubElement(vnetwork_hosts, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
mac = ET.SubElement(vnetwork_hosts, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_vswitch(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
vswitch = ET.SubElement(vnetwork_hosts, "vswitch")
vswitch.text = kwargs.pop('vswitch')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
interface_type = ET.SubElement(vnetwork_hosts, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_vnetwork_hosts_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts")
interface_name = ET.SubElement(vnetwork_hosts, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_hosts_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_hosts = ET.Element("get_vnetwork_hosts")
config = get_vnetwork_hosts
output = ET.SubElement(get_vnetwork_hosts, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
name = ET.SubElement(vnetwork_vms, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
mac = ET.SubElement(vnetwork_vms, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
datacenter = ET.SubElement(vnetwork_vms, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
ip = ET.SubElement(vnetwork_vms, "ip")
ip.text = kwargs.pop('ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_vnetwork_vms_host_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
vnetwork_vms = ET.SubElement(output, "vnetwork-vms")
host_nn = ET.SubElement(vnetwork_vms, "host-nn")
host_nn.text = kwargs.pop('host_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vms_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
input = ET.SubElement(get_vnetwork_dvpgs, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
name = ET.SubElement(vnetwork_dvpgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
datacenter = ET.SubElement(vnetwork_dvpgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_dvs_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
dvs_nn = ET.SubElement(vnetwork_dvpgs, "dvs-nn")
dvs_nn.text = kwargs.pop('dvs_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs")
vlan = ET.SubElement(vnetwork_dvpgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvpgs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
input = ET.SubElement(get_vnetwork_dvs, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
name = ET.SubElement(vnetwork_dvs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
host = ET.SubElement(vnetwork_dvs, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
datacenter = ET.SubElement(vnetwork_dvs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_pnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
pnic = ET.SubElement(vnetwork_dvs, "pnic")
pnic.text = kwargs.pop('pnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
interface_type = ET.SubElement(vnetwork_dvs, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_vnetwork_dvs_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
interface_name = ET.SubElement(vnetwork_dvs, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_dvs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
name = ET.SubElement(vnetwork_vswitches, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
host = ET.SubElement(vnetwork_vswitches, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
datacenter = ET.SubElement(vnetwork_vswitches, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_pnic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
pnic = ET.SubElement(vnetwork_vswitches, "pnic")
pnic.text = kwargs.pop('pnic')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
interface_type = ET.SubElement(vnetwork_vswitches, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_vnetwork_vswitches_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches")
interface_name = ET.SubElement(vnetwork_vswitches, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_vswitches_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
input = ET.SubElement(get_vnetwork_portgroups, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
name = ET.SubElement(vnetwork_pgs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
datacenter = ET.SubElement(vnetwork_pgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_vs_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vs_nn = ET.SubElement(vnetwork_pgs, "vs-nn")
vs_nn.text = kwargs.pop('vs_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
vlan = ET.SubElement(vnetwork_pgs, "vlan")
vlan.text = kwargs.pop('vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_vnetwork_pgs_host_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
host_nn = ET.SubElement(vnetwork_pgs, "host-nn")
host_nn.text = kwargs.pop('host_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vnetwork_portgroups_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
mac = ET.SubElement(input, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
mac = ET.SubElement(vmpolicy_macaddr, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
name = ET.SubElement(vmpolicy_macaddr, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
datacenter = ET.SubElement(vmpolicy_macaddr, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_dvpg_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
dvpg_nn = ET.SubElement(vmpolicy_macaddr, "dvpg-nn")
dvpg_nn.text = kwargs.pop('dvpg_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_port_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
port_nn = ET.SubElement(vmpolicy_macaddr, "port-nn")
port_nn.text = kwargs.pop('port_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_port_prof(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
port_prof = ET.SubElement(vmpolicy_macaddr, "port-prof")
port_prof.text = kwargs.pop('port_prof')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vmpolicy_macaddr_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_url(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_credentials_vrf_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
vrf_name = ET.SubElement(credentials, "vrf-name")
vrf_name.text = kwargs.pop('vrf_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
activate = ET.SubElement(vcenter, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
interval = ET.SubElement(vcenter, "interval")
interval.text = kwargs.pop('interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_discovery_ignore_delete_all_response_ignore_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
discovery = ET.SubElement(vcenter, "discovery")
ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response")
ignore_value = ET.SubElement(ignore_delete_all_response, "ignore-value")
ignore_value.text = kwargs.pop('ignore_value')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcenter_discovery_ignore_delete_all_response_always(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop('id')
discovery = ET.SubElement(vcenter, "discovery")
ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response")
always = ET.SubElement(ignore_delete_all_response, "always")
callback = kwargs.pop('callback', self._callback)
return callback(config)
| {
"content_hash": "95da5b6afed7dbe5d3c9f0d67c2eec03",
"timestamp": "",
"source": "github",
"line_count": 2436,
"max_line_length": 96,
"avg_line_length": 40.6871921182266,
"alnum_prop": 0.6161591702484008,
"repo_name": "BRCDcomm/pynos",
"id": "e9f44070afe3ed3627fb05b72cdf617630d4e801",
"size": "99136",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44939904"
}
],
"symlink_target": ""
} |
import sys
from link_analyzer import Analyzer
from link_file import LinkFile
if __name__ == "__main__":
for arg in sys.argv[1:]:
file = arg.split(",")
decode = True if (len(file) > 1) else False
print "Parsing file '{}' with decoder '{}'".format(file[0], decode)
link_file = LinkFile(file[0], decode)
analyzer_ab, analyzer_ba = link_file.get_analyzers()
analyzer_ab.save_all_plots()
analyzer_ba.save_all_plots()
| {
"content_hash": "b12366b620c82d285d00a1a14b7deaf0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 24,
"alnum_prop": 0.5979166666666667,
"repo_name": "salkinium/bachelor",
"id": "ef109a1a65fd9893ed19ae4518e61a476a5646cf",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "link_analysis/experiment_parser_all.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "966"
},
{
"name": "C++",
"bytes": "23430"
},
{
"name": "Erlang",
"bytes": "203"
},
{
"name": "Python",
"bytes": "169735"
},
{
"name": "TeX",
"bytes": "202818"
},
{
"name": "nesC",
"bytes": "8724"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from keras import backend as K
from keras.layers import Input
from keras.models import Sequential, Model
from numpy.testing import assert_allclose
from keras_contrib.layers import InstanceNormalization
from keras_contrib.utils.test_utils import layer_test
input_1 = np.arange(10)
input_2 = np.zeros(10)
input_3 = np.ones(10)
input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
def basic_instancenorm_test():
from keras import regularizers
layer_test(InstanceNormalization,
kwargs={'epsilon': 0.1,
'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'gamma_initializer': 'ones',
'beta_initializer': 'ones'},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'scale': False, 'center': False},
input_shape=(3, 3))
@pytest.mark.parametrize('input_shape,axis', [((10, 1), -1),
((10,), None)])
def test_instancenorm_correctness_rank2(input_shape, axis):
model = Sequential()
norm = InstanceNormalization(input_shape=input_shape, axis=axis)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000,) + input_shape)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_training_argument():
bn1 = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
y1 = bn1(x1, training=True)
model1 = Model(x1, y1)
np.random.seed(123)
x = np.random.normal(loc=5.0, scale=10.0, size=(20, 10))
output_a = model1.predict(x)
model1.compile(loss='mse', optimizer='rmsprop')
model1.fit(x, x, epochs=1, verbose=0)
output_b = model1.predict(x)
assert np.abs(np.sum(output_a - output_b)) > 0.1
assert_allclose(output_b.mean(), 0.0, atol=1e-1)
assert_allclose(output_b.std(), 1.0, atol=1e-1)
bn2 = InstanceNormalization(input_shape=(10,))
x2 = Input(shape=(10,))
bn2(x2, training=False)
def test_instancenorm_convnet():
model = Sequential()
norm = InstanceNormalization(axis=1, input_shape=(3, 4, 4))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_shared_instancenorm():
'''Test that a IN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_instancenorm_perinstancecorrectness():
model = Sequential()
norm = InstanceNormalization(input_shape=(10,))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# bimodal distribution
z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
x = np.append(z, y)
x = np.reshape(x, (4, 10))
model.fit(x, x, epochs=4, batch_size=4, verbose=1)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
# verify that each instance in the batch is individually normalized
for i in range(4):
instance = out[i]
assert_allclose(instance.mean(), 0.0, atol=1e-1)
assert_allclose(instance.std(), 1.0, atol=1e-1)
# if each instance is normalized, so should the batch
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_perchannel_correctness():
# have each channel with a different average and std
x = np.random.normal(loc=5.0, scale=2.0, size=(10, 1, 4, 4))
y = np.random.normal(loc=10.0, scale=3.0, size=(10, 1, 4, 4))
z = np.random.normal(loc=-5.0, scale=5.0, size=(10, 1, 4, 4))
batch = np.append(x, y, axis=1)
batch = np.append(batch, z, axis=1)
# this model does not provide a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=None,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values will not be normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert abs(activations.mean()) > 1e-2
assert abs(activations.std() - 1.0) > 1e-6
# but values are still normalized per-instance
activations = out[instance]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
# this model sets the channel as a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=1,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values are now normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "64323c0d89d9b3528863585561695707",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 73,
"avg_line_length": 34.5736040609137,
"alnum_prop": 0.5831742769050066,
"repo_name": "keras-team/keras-contrib",
"id": "67c25b86a155f7507e94c11e8cfa36ac583f3533",
"size": "6811",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/keras_contrib/layers/normalization/test_instancenormalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3032"
},
{
"name": "HTML",
"bytes": "12790"
},
{
"name": "Python",
"bytes": "437184"
},
{
"name": "Shell",
"bytes": "243"
}
],
"symlink_target": ""
} |
"""
This package contains the actual wikicode parser, split up into two main
modules: the :mod:`.tokenizer` and the :mod:`.builder`. This module joins them
together into one interface.
"""
from .builder import Builder
from .errors import ParserError
try:
from ._tokenizer import CTokenizer
use_c = True
except ImportError:
from .tokenizer import Tokenizer
CTokenizer = None
use_c = False
__all__ = ["use_c", "Parser", "ParserError"]
class Parser:
"""Represents a parser for wikicode.
Actual parsing is a two-step process: first, the text is split up into a
series of tokens by the :class:`.Tokenizer`, and then the tokens are
converted into trees of :class:`.Wikicode` objects and :class:`.Node`\\ s
by the :class:`.Builder`.
Instances of this class or its dependents (:class:`.Tokenizer` and
:class:`.Builder`) should not be shared between threads. :meth:`parse` can
be called multiple times as long as it is not done concurrently. In
general, there is no need to do this because parsing should be done through
:func:`mwparserfromhell.parse`, which creates a new :class:`.Parser` object
as necessary.
"""
def __init__(self):
if use_c and CTokenizer:
self._tokenizer = CTokenizer()
else:
from .tokenizer import Tokenizer
self._tokenizer = Tokenizer()
self._builder = Builder()
def parse(self, text, context=0, skip_style_tags=False):
"""Parse *text*, returning a :class:`.Wikicode` object tree.
If given, *context* will be passed as a starting context to the parser.
This is helpful when this function is used inside node attribute
setters. For example, :class:`.ExternalLink`\\ 's
:attr:`~.ExternalLink.url` setter sets *context* to
:mod:`contexts.EXT_LINK_URI <.contexts>` to prevent the URL itself
from becoming an :class:`.ExternalLink`.
If *skip_style_tags* is ``True``, then ``''`` and ``'''`` will not be
parsed, but instead will be treated as plain text.
If there is an internal error while parsing, :exc:`.ParserError` will
be raised.
"""
tokens = self._tokenizer.tokenize(text, context, skip_style_tags)
code = self._builder.build(tokens)
return code
| {
"content_hash": "ed861a874a1eeec0cb3a9a9deedee29b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 38.14754098360656,
"alnum_prop": 0.6574989256553503,
"repo_name": "jayvdb/mwparserfromhell",
"id": "3fad93baec47b294b60806b70aba879402a98e15",
"size": "3449",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/mwparserfromhell/parser/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1976"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "403108"
},
{
"name": "Shell",
"bytes": "4950"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0028_auto_20141022_1603'),
]
operations = [
migrations.AlterField(
model_name='taxsaveinputs',
name='dividends',
field=models.FloatField(default=None, null=True, blank=True, choices=[(0.0, b'Threshold 1'), (0.15, b'Threshold 2'), (0.2, b'Threshold 3')]),
),
migrations.AlterField(
model_name='taxsaveinputs',
name='income_tax_rate',
field=models.FloatField(default=None, null=True, blank=True, choices=[(0.0, b'Threshold 1'), (0.0, b'Threshold 2'), (0.0, b'Threshold 3'), (0.0, b'Threshold 4'), (0.0, b'Threshold 5'), (0.0, b'Threshold 6')]),
),
migrations.AlterField(
model_name='taxsaveinputs',
name='long_term_cap',
field=models.FloatField(default=None, null=True, blank=True, choices=[(0.15, b'Threshold 1'), (0.25, b'Threshold 2'), (0.28, b'Threshold 3')]),
),
migrations.AlterField(
model_name='taxsaveinputs',
name='medicare_unearned_inc_rate',
field=models.FloatField(default=0.038, null=True, blank=True),
),
]
| {
"content_hash": "ab64b24555564f2eca27efecc3396860",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 221,
"avg_line_length": 39.63636363636363,
"alnum_prop": 0.5848623853211009,
"repo_name": "nolanzandi/webapp-public",
"id": "7eb7e5ff9934c534edc9721606301b25dc856a59",
"size": "1332",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0029_auto_20141022_2208.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61908"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "380111"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
import matplotlib as mpl
mpl.use('pdf')
import pylab as plt
import numpy as np
import glob
import sys
import argparse
from crrlpy import crrls
from scipy import interpolate
def show_coverage(spec, out, x_col, y_col, t_col):
"""
Shows the coverage of spec in percentage.
"""
data = np.loadtxt(spec)
x = data[:,x_col]
c = data[:,y_col]
t = data[:,t_col]
if abs(t.max()) < abs(t.min()):
tnorm = abs(t.min())
else:
tnorm = abs(t.max())
fig = plt.figure(frameon=False)
ax = fig.add_subplot(1, 1, 1, adjustable='datalim')
ax.step(x, abs(c - c.max())/c.max()*100, 'k-',
drawstyle='steps', lw=1, where='pre', label='coverage')
ax.step(x, t/tnorm*100, '-', c='gray',
drawstyle='steps', lw=1, where='pre', label='spectrum')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Percentage $\%$')
ax.legend(loc=0, numpoints=1, frameon=False)
plt.savefig('{0}'.format(out),
bbox_inches='tight', pad_inches=0.3)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('spec', type=str,
help="File with coverage to show.\n" \
"E.g., \"lba_hgh_*.ascii\" (string).\n" \
"Wildcards and [] accepted.")
parser.add_argument('out', type=str,
help="Output plot filename.\n" \
"E.g., CIalpha_stack1_coverage.pdf (string).")
parser.add_argument('--x_col', type=int, default=0,
help="Column with x axis values.\n" \
"Default: 0")
parser.add_argument('--y_col', type=int, default=2,
help="Column with y axis values.\n" \
"Default: 2")
parser.add_argument('--t_col', type=int, default=1,
help="Column with optical depth values.\n" \
"Default: 1")
args = parser.parse_args()
show_coverage(args.spec, args.out, args.x_col, args.y_col, args.t_col) | {
"content_hash": "6e1aa4e21908ce81afccef6558b6775f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 35.682539682539684,
"alnum_prop": 0.5195729537366548,
"repo_name": "astrofle/CRRLpy",
"id": "89d618606fdc3358550380e728220d7cab669c27",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/show_coverage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "311127"
}
],
"symlink_target": ""
} |
"""Provides deal object for PMP-E and Global Deals."""
from __future__ import absolute_import
from functools import partial
from .. import t1types
from ..entity import Entity
class Deal(Entity):
"""docstring for deals."""
collection = 'deals'
resource = 'deal'
_post_format = 'json'
_bill_types = t1types.enum({'EXCHANGE', 'PUBLISHER', 'NONE'}, 'EXCHANGE')
_price_methods = t1types.enum({'CPM'}, 'CPM')
_price_types = t1types.enum({'FIXED', 'FLOOR'}, None)
_deal_types = t1types.enum({'STANDARD', 'PG'}, 'STANDARD')
_pull = {
'created_on': t1types.strpt,
'deal_identifier': None,
'deal_type': None,
'description': None,
'end_datetime': t1types.strpt,
'id': int,
'name': None,
'bill_type': None,
'owner': dict,
'permissions': dict,
'price': dict,
'price_method': None,
'price_type': None,
'start_datetime': t1types.strpt,
'status': bool,
'supply_source_id': int,
'sub_supply_source_id': int,
'updated_on': t1types.strpt,
'zone_name': None,
}
_push = _pull.copy()
_push.update({
'bill_type': _bill_types,
'deal_type': _deal_types,
'end_datetime': partial(t1types.strft, offset=True),
'price_method': _price_methods,
'price_type': _price_types,
'start_datetime': partial(t1types.strft, offset=True),
})
def __init__(self, session, properties=None, **kwargs):
super(Deal, self).__init__(session, properties, **kwargs)
| {
"content_hash": "6313c102c09864aa961d3b1ea54c8fc8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 30.46153846153846,
"alnum_prop": 0.5681818181818182,
"repo_name": "MediaMath/t1-python",
"id": "3e144600e7e06606a8323e74af36a2ded2ff4a52",
"size": "1608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terminalone/models/deal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "238783"
}
],
"symlink_target": ""
} |
from rdflib import Graph, Literal, BNode, Namespace, RDF, XSD
import queries
from dateutil import parser
global SSN
global GEO
global DUL
# SSN Namespace
SSN = Namespace('https://www.w3.org/2005/Incubator/ssn/ssnx/ssn#')
# Geo Namespace
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
# DUL Namespace
DUL = Namespace('http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#')
def addToGraph(event, graph):
graph.bind('ssn', SSN)
graph.bind('geo', GEO)
graph.bind('dul', DUL)
observation = BNode();
oTime = BNode();
# Observation
graph.add((observation, RDF.type, SSN.Observation))
graph.add((oTime, RDF.type, DUL.TimeInterval))
graph.add((observation, SSN.observationSamplingTime, oTime))
# Time
date = parser.parse(event['pickup_datetime'])
t = Literal(date.strftime("%Y-%m-%dT%H:%M:%S"), datatype=XSD.dateTime)
#t = Literal(event['dropoff_datetime'])
#print(event['dropoff_datetime'])
graph.add((oTime, DUL.hasRegionDataValue, t))
# SensorOutput
sensorOutput = BNode();
graph.add((sensorOutput, RDF.type, SSN.SensorOutput))
graph.add((observation, SSN.observationResult, sensorOutput))
# ObservationValue
observationValue = BNode()
startLocation = BNode()
endLocation = BNode()
graph.add((observationValue, RDF.type, SSN.ObservationValue))
graph.add((sensorOutput, SSN.hasValue, observationValue))
# Start and End Location
graph.add((observationValue, SSN.hasStartLocation, startLocation))
graph.add((observationValue, SSN.hasEndLocation, endLocation))
graph.add((startLocation, RDF.type, GEO.location))
graph.add((endLocation, RDF.type, GEO.location))
# Start Location
lat = Literal(event['pickup_latitude'], datatype=XSD.float)
long = Literal(event['pickup_longitude'], datatype=XSD.float)
# Adding the start location
graph.add((startLocation, GEO.lat, lat))
graph.add((startLocation, GEO.long, long))
# End Location
lat = Literal(event['dropoff_latitude'], datatype=XSD.float)
long = Literal(event['dropoff_longitude'], datatype=XSD.float)
# Adding the start location
graph.add((endLocation, GEO.lat, lat))
graph.add((endLocation, GEO.long, long))
#Duration
date1 = parser.parse(event['dropoff_datetime'])
date2 = parser.parse(event['pickup_datetime'])
dur = date1 - date2
duration = Literal(str(dur), datatype=XSD.float)
graph.add((observation, SSN.hasDuration, duration))
return graph
def removeFromGraph(timestamp, graph):
results = graph.query(queries.getEvents(timestamp))
print len(results)
for result in results:
for node in result:
graph.remove((node, None, None))
| {
"content_hash": "39f99f318bcec3cad9e2e90524579ffe",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 74,
"avg_line_length": 31.102272727272727,
"alnum_prop": 0.689806357325539,
"repo_name": "nikha1/nyc-taxi",
"id": "ce191aa699631d342c422d71b5262e884b9af385",
"size": "2737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/InMemoryGraph/utils/graphlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17723"
},
{
"name": "JavaScript",
"bytes": "11947"
},
{
"name": "Jupyter Notebook",
"bytes": "1333980"
},
{
"name": "Python",
"bytes": "93777"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.