text stringlengths 4 1.02M | meta dict |
|---|---|
import random
from math import isnan
from collections import OrderedDict, namedtuple
from lxml import etree
from pandas import Series, concat
from omicexperiment.plotting.plot_pygal import return_plot, return_plot_tree
PositionData = namedtuple("PositionData", 'x,y,x1,x2')
def return_y_position_group_label(tree):
path0 = tree.xpath('//path[@class="guide line"]')[0]
#label0 = path0.getnext()
#y = ((float(rect0.attrib['y']) + float(rect0.attrib['height'])) \
# + float(label0.attrib['y'])
# ) / 2
#axis_x = tree.xpath('//g[@class="axis x"]')[0]
y = float(path0.attrib['d'].split(" ")[2].split("v")[1])
return y
def return_rects_first_and_last_x(rects):
rect0 = rects[0]
last_rect = rects[-1::][0]
x_left = float(rect0.attrib['x'])
x_right = float(last_rect.attrib['x']) + float(last_rect.attrib['width'])
mid_x = (x_left + x_right) / 2
return (x_left, x_right)
def return_rects_middle_x(rects):
xs = return_rects_first_and_last_x(rects)
x_left = xs[0]
x_right = xs[1]
mid_x = (x_left + x_right) / 2
return (mid_x)
def return_rects_for_groups(tree, group_col):
rects = tree.xpath('//rect[@class="rect reactive tooltip-trigger"]')
index_values = group_col.value_counts().index.sort_values()
rects_dict = OrderedDict()
for index_value in index_values:
rects_dict[index_value] = []
#check for nan
if len(group_col[group_col.isnull()]) > 0:
rects_dict['nan'] = []
for i, rect in enumerate(rects):
rect_val = rect.getnext().getnext().getnext().getnext().text
key = val = group_col[rect_val]
try:
rects_dict[key].append(rect)
except:
if isnan(key):
rects_dict['nan'].append(rect)
return rects_dict
def return_xlabels_for_groups(tree, group_col):
paths = tree.xpath('//path[@class="guide line"]')
labels = []
for path in paths:
labels.append(path.getnext())
for l in group_col.index:
labels.append(l)
index_values = group_col.value_counts().index.sort_values()
labels_dict = OrderedDict()
for index_value in index_values:
labels_dict[index_value] = []
#check for nan
if len(group_col[group_col.isnull()]) > 0:
labels_dict['nan'] = []
for i, index_val in enumerate(group_col.index):
key = group_col[index_val]
lbl = labels[i]
try:
labels_dict[key].append(lbl)
except:
if isnan(key):
labels_dict['nan'].append(lbl)
return labels_dict
def group_label_positions(tree, group_col, pad):
rects_dict = return_rects_for_groups(tree, group_col)
pos_dict = OrderedDict()
y = return_y_position_group_label(tree)
try:
rects_dict_iteritems = rects_dict.iteritems
except AttributeError: #python3
rects_dict_iteritems = rects_dict.items
for i, kv in enumerate(rects_dict_iteritems()):
k,grp_rects = kv
x1,x2 = return_rects_first_and_last_x(grp_rects)
mid_x = (x1 + x2) / 2
if pad:
x1,x2,mid_x = x1+(pad*i), x2+(pad*i),mid_x+(pad*i)
pos_dict[k] = PositionData(x=mid_x,x1=x1,x2=x2,y=y)
return pos_dict
def add_group_labels(tree, group_col, pad):
axis_x = tree.xpath('//g[@class="axis x"]')[0]
lbls_dict = group_label_positions(tree, group_col, pad)
rand_grey = 200
try:
lbls_dict_iteritems = lbls_dict.iteritems
except AttributeError: #python3
lbls_dict_iteritems = lbls_dict.items
for lbl, pos in lbls_dict_iteritems():
l = etree.Element("line", x1=str(pos.x1), x2=str(pos.x2), y1=str(pos.y), y2=str(pos.y), style="stroke:rgb({x}, {x}, {x});stroke-width:1".format(x=str(rand_grey)))
v1 = etree.Element("line", x1=str(pos.x1), x2=str(pos.x1), y1=str(pos.y-3), y2=str(pos.y+3), style="stroke:rgb({x}, {x}, {x});stroke-width:1".format(x=str(rand_grey)))
v2 = etree.Element("line", x1=str(pos.x2), x2=str(pos.x2), y1=str(pos.y-3), y2=str(pos.y+3), style="stroke:rgb({x}, {x}, {x});stroke-width:1".format(x=str(rand_grey)))
lbl_length = pos.x2 - pos.x1
if lbl_length < 35 and len(str(lbl)) > 5:
t_attr = {'lengthAdjust':"spacingAndGlyphs", 'textLength':str(lbl_length)}
else:
t_attr = {}
t = etree.Element("text", x=str(pos[0]), y=str(pos[1]), **t_attr)
t.text = str(lbl)
axis_x.append(l)
axis_x.append(v1)
axis_x.append(v2)
axis_x.append(t)
rand_grey = random.randint(150,200)
def group_plot_tree(dataframe, mapping_group_col, include_nan=False, pad=10):
group_col_name = mapping_group_col.name
sorted_group_col = mapping_group_col.sort_values()
vals = list(sorted_group_col.value_counts().index)
if include_nan:
vals.append('nan')
else:
sorted_group_col = sorted_group_col[sorted_group_col.notnull()]
sorted_df = dataframe.reindex(columns=sorted_group_col.index)
plot = return_plot(sorted_df)
tree = return_plot_tree(plot)
if pad != False:
rects_dict = return_rects_for_groups(tree, sorted_group_col)
for i, key in enumerate(rects_dict):
for rect in rects_dict[key]:
rect.attrib['transform'] = 'translate({},0)'.format(str(pad*i))
lables_dict = return_xlabels_for_groups(tree, sorted_group_col)
for i, key in enumerate(lables_dict):
for lbl in lables_dict[key]:
#the parent of the label here is the <g class='guides'> tag
lbl.getparent().attrib['transform'] = 'translate({},0)'.format(str(pad*i))
add_group_labels(tree, sorted_group_col, pad)
return plot, tree
| {
"content_hash": "eb7cf5387e386faedac26d4fdb591645",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 171,
"avg_line_length": 30.082417582417584,
"alnum_prop": 0.6321461187214612,
"repo_name": "bassio/omicexperiment",
"id": "54b869c5adaae1e8ffddbcce987f307be4b04ec6",
"size": "5475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omicexperiment/plotting/groups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "338397"
},
{
"name": "Python",
"bytes": "99701"
}
],
"symlink_target": ""
} |
from .base import SettingsBase
class SettingsLocalSqlite(SettingsBase):
DEBUG = True
BASE_URL = 'http://127.0.0.1:8004'
def JS_CONFIG(self):
conf = super(SettingsLocalSqlite, self).JS_CONFIG()
conf['WS_SERVER'] = 'http://127.0.0.1:22000/ws'
conf['ENABLE_REGISTER'] = True
return conf
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/home/vagrant/.westlife/vresqlite.db'
}
}
| {
"content_hash": "b2b2270d4f193ddeff17b62064306fc3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 24.95,
"alnum_prop": 0.5751503006012024,
"repo_name": "h2020-westlife-eu/VRE",
"id": "19f983920051eb52296d693243cfba1089e880f0",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pype/settings/local_sqlite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46730"
},
{
"name": "HTML",
"bytes": "167326"
},
{
"name": "JavaScript",
"bytes": "22031"
},
{
"name": "Python",
"bytes": "310068"
},
{
"name": "Shell",
"bytes": "863"
}
],
"symlink_target": ""
} |
import asyncio
import discord
import random
from discord.ext import commands
from Cogs import DisplayName
from Cogs import Nullify
class Eat:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def eat(self, ctx, *, member : str = None):
"""Eat like a boss."""
authorName = DisplayName.name(ctx.message.author)
# Check if we're eating nothing
if member == None:
nothingList = [ 'you sit quietly and eat *nothing*...',
'you\'re *sure* there was something to eat, so you just chew on nothingness...',
'there comes a time when you need to realize that you\'re just chewing nothing for the sake of chewing. That time is now.']
randnum = random.randint(0, len(nothingList)-1)
msg = '*{}*, {}'.format(authorName, nothingList[randnum])
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
return
# Check if we're eating a member
memberCheck = DisplayName.memberForName(member, ctx.message.server)
if memberCheck:
# We're eating a member - let's do a bot-check
if memberCheck.id == self.bot.user.id:
# It's me!
memberList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',
'your mouth hangs open for a brief second before you realize that *I\'m* eating *you*.',
'I\'m a bot. You can\'t eat me.',
'your jaw clamps down on... wait... on nothing, because I\'m *digital!*.',
'what kind of bot would I be if I let you eat me?']
elif memberCheck.id == ctx.message.author.id:
# We're eating... ourselves?
memberList = [ 'you clamp down on your own forearm - not surprisingly, it hurts.',
'you place a finger into your mouth, but *just can\'t* force yourself to bite down.',
'you happily munch away, but can now only wave with your left hand.',
'wait - you\'re not a sandwich!',
'you might not be the smartest...']
else:
memName = DisplayName.name(memberCheck)
memberList = [ 'you unhinge your jaw and consume *{}* in one bite.'.format(memName),
'you try to eat *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...'.format(memName),
'you take a quick bite out of *{}*. They probably didn\'t even notice.'.format(memName),
'you sink your teeth into *{}\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.'.format(memName),
'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.'.format(memName)]
randnum = random.randint(0, len(memberList)-1)
msg = '*{}*, {}'.format(authorName, memberList[randnum])
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
return
# Assume we're eating something else
itemList = [ 'you take a big chunk out of *{}*. *Delicious.*'.format(member),
'your teeth sink into *{}* - it tastes satisfying.'.format(member),
'you rip hungrily into *{}*, tearing it to bits!'.format(member),
'you just can\'t bring yourself to eat *{}* - so you just hold it for awhile...'.format(member),
'you attempt to bite into *{}*, but you\'re clumsier than you remember - and fail...'.format(member),]
randnum = random.randint(0, len(itemList)-1)
msg = '*{}*, {}'.format(authorName, itemList[randnum])
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
return | {
"content_hash": "fda6e2d8d215b9cca4377d6dd3af87a1",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 149,
"avg_line_length": 48.5,
"alnum_prop": 0.6606297018668152,
"repo_name": "Mercurial/CorpBot.py",
"id": "72c540713581cb963483ad3220bb210fa857f65e",
"size": "3589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cogs/Eat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2708"
},
{
"name": "Python",
"bytes": "575793"
},
{
"name": "Shell",
"bytes": "2717"
}
],
"symlink_target": ""
} |
""" Deep Residual Network.
Applying a Deep Residual Network to CIFAR-10 Dataset classification task.
References:
- K. He, X. Zhang, S. Ren, and J. Sun. Deep Residual Learning for Image
Recognition, 2015.
- Learning Multiple Layers of Features from Tiny Images, A. Krizhevsky, 2009.
Links:
- [Deep Residual Network](http://arxiv.org/pdf/1512.03385.pdf)
- [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html)
"""
from __future__ import division, print_function, absolute_import
import tflearn
# Residual blocks
# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
n = 5
datapath = '../../data/cifar-10-batches-py'
# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data(dirname=datapath)
Y = tflearn.data_utils.to_categorical(Y, 10)
testY = tflearn.data_utils.to_categorical(testY, 10)
# Real-time data preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True)
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)
# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3],
data_preprocessing=img_prep,
data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='rnc-ckpt/model_resnet_cifar10',
max_checkpoints=3, tensorboard_verbose=0,
clip_gradients=0.)
model.fit(X, Y, n_epoch=2, validation_set=(testX, testY),
snapshot_epoch=True, snapshot_step=500,
show_metric=True, batch_size=128, shuffle=True,
run_id='resnet_cifar10')
| {
"content_hash": "deca7d36d74c52974ee0d7946884e5f0",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 81,
"avg_line_length": 35.73134328358209,
"alnum_prop": 0.6904761904761905,
"repo_name": "hashware/tflearn-learn",
"id": "df90f58b5d3e157ce9817532beb3737960cf92ce",
"size": "2419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/images/residual_network_cifar10.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from suma.api.serializers.link import serialize_create_link, serialize_get_link
| {
"content_hash": "0f56d6d7f71bfe282dfef43bd692c93f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 79,
"avg_line_length": 80,
"alnum_prop": 0.8375,
"repo_name": "rach/suma",
"id": "4bdc79d23a7b81ccfdde495124d29ca765a21f65",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "suma/api/serializers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "63211"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
from py_privatekonomi.core.models.base_model import BaseModel
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Numeric, ForeignKeyConstraint
from collections import OrderedDict
class SecurityProvider(BaseModel):
def __init__(self, context, customizations={}):
pre_cols = OrderedDict([
('id', Column('id', Integer, primary_key = True)),
('name', Column('name', String(255), nullable=False)),
('security_type', Column('security_type', String(64), nullable=True))
])
# apply customizations
for key in customizations:
custom = customizations[key]
pre_cols[key] = custom
cols = list(pre_cols.values())
super(SecurityProvider, self).__init__(
Table('security_provider', context.metadata, *cols), context)
| {
"content_hash": "7f5722f3ebd1bffb4757d5ab513197e8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 106,
"avg_line_length": 46.55,
"alnum_prop": 0.6541353383458647,
"repo_name": "nilsFK/py-privatekonomi",
"id": "244d7efad1a6783b06b004b4610492cdb1370ec6",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_privatekonomi/core/models/security_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189907"
}
],
"symlink_target": ""
} |
import click
from tabulate import tabulate
from ..util import format_money, format_percent
def _get_total_payments(connection):
command = 'SELECT cast(sum(amount) AS REAL) / 100 FROM payments'
total_payments = connection.execute(command).fetchone()[0]
return total_payments if total_payments else 0.0
def _get_total_rewards(connection):
command = 'SELECT cast(sum(value) AS REAL) / 100 FROM rewards'
total_rewards = connection.execute(command).fetchone()[0]
return total_rewards if total_rewards else 0.0
def list_stats(connection):
total_payments = _get_total_payments(connection)
total_rewards = _get_total_rewards(connection)
try:
reward_rate = round(total_rewards / total_payments * 100, 2)
except ZeroDivisionError:
reward_rate = None
stats = [
['Total Payments', format_money(total_payments)],
['Total Rewards', format_money(total_rewards)],
['Reward Rate',
format_percent(reward_rate) if reward_rate else 'N/A']
]
headers = ['Statistic', 'Value']
click.echo_via_pager(tabulate(stats, headers, 'fancy_grid'))
| {
"content_hash": "a1e3d1226722cf7f661a47109db22fd0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 34.15151515151515,
"alnum_prop": 0.6832298136645962,
"repo_name": "dguo/churn",
"id": "dbec374a8328a056ca4a3d19e34ec962b9a24ec4",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/subcommands/stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "175"
},
{
"name": "Python",
"bytes": "48959"
}
],
"symlink_target": ""
} |
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"python-dateutil==2.7.3",
"dateutils==0.6.6",
"chardet==3.0.4",
"notify2==0.3.1",
]
test_requirements = [
"bumpversion",
"flake8",
"tox",
"coverage",
"sphinx",
"nose",
"pipenv",
]
setup(
name='bureaucrate',
version='0.3.8',
description="A maildir-based executer of rules, destined to sort and "
"automate mail",
long_description=readme + '\n\n' + history,
author="Paul Ollivier",
author_email='contact@paulollivier.fr',
url='https://github.com/paulollivier/bureaucrate',
packages=[
'bureaucrate',
],
package_dir={
'bureaucrate':
'bureaucrate'
},
entry_points={
'console_scripts': [
'bureaucrate=bureaucrate.__main__:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='bureaucrate',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Environment :: Console',
'Topic :: Communications :: Email :: Filters',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| {
"content_hash": "605bf067954799d95678f87cedf14187",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 25.2,
"alnum_prop": 0.5818070818070818,
"repo_name": "paulollivier/bureaucrate",
"id": "95f0ab177db6e6648c2325fe123f354df66347b0",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2311"
},
{
"name": "Python",
"bytes": "27138"
}
],
"symlink_target": ""
} |
from sphinxql import indexes, fields
from sphinxql.manager import IndexManager
from .models import Entity, Contract, Tender
class EntityIndex(indexes.Index):
name = fields.Text('name')
class Meta:
model = Entity
class Manager(IndexManager):
def get_queryset(self):
return super(Manager, self).get_queryset()\
.extra(select={'signing_date_is_null': 'signing_date IS NULL'},
order_by=['signing_date_is_null', '-signing_date'])
class ContractIndex(indexes.Index):
name = fields.Text('description')
description = fields.Text('contract_description')
signing_date = fields.Date('signing_date')
category_id = fields.Integer('category')
category = fields.Text('category__description_pt')
district = fields.Text('district__name')
council = fields.Text('council__name')
objects = Manager()
class Meta:
model = Contract
query = Contract.default_objects.all()
class TenderIndex(indexes.Index):
description = fields.Text('description')
category = fields.Text('category__description_pt')
publication_date = fields.Date('publication_date')
class Meta:
model = Tender
| {
"content_hash": "d636ada1581a7d347bf48a8ae76bdb31",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 24.612244897959183,
"alnum_prop": 0.6674958540630183,
"repo_name": "jorgecarleitao/public-contracts",
"id": "226cefe0f1d9594dcbe5ed61cbf31f7f62372eab",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contracts/indexes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "858"
},
{
"name": "HTML",
"bytes": "144711"
},
{
"name": "Python",
"bytes": "263813"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
} |
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import cache
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['moviexk.com', 'moviexk.org']
self.base_link = 'https://moviexk.org'
self.search_link = '/search/%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def searchMovie(self, title, year, aliases):
try:
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year)
url = client.request(url, output='geturl')
if url == None:
t = cleantitle.get(title)
q = '%s %s' % (title, year)
q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(q))
r = client.request(q)
r = client.parseDOM(r, 'div', attrs={'class': 'inner'})
r = client.parseDOM(r, 'div', attrs={'class': 'info'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], re.findall('(?:^Watch Movie |^Watch movies |^Watch |)(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
if url == None: raise Exception()
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = self.searchMovie(title, year, aliases)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
q = '%s' % tvshowtitle
q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(q))
r = client.request(q)
r = client.parseDOM(r, 'div', attrs={'class': 'inner'})
r = client.parseDOM(r, 'div', attrs={'class': 'info'})
r = zip(client.parseDOM(r, 'a', ret='href'),client.parseDOM(r, 'a', ret='title'))
r = [(i[0], re.findall('(?:^Watch Movie |^Watch movies |^Watch |)(.+?)\((.+?)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [(i[0], i[1].rsplit('TV Series')[0].strip('(')) for i in r if i[1]]
r = [(urllib.unquote_plus(i[0]), i[1]) for i in r]
r = [(urlparse.urlparse(i[0]).path, i[1]) for i in r]
r = [i for i in r if self.matchAlias(i[1], aliases)]
r = urlparse.urljoin(self.base_link, r[0][0].strip())
if '/watch-movie-' in r: r = re.sub('/watch-movie-|-\d+$', '/', r)
y = re.findall('(\d{4})', r)
if y:
y = y[0]
else:
y = client.request(r)
y = re.findall('(?:D|d)ate\s*:\s*(\d{4})', y)[0]
if not year == y: raise Exception()
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = '%s?season=%01d&episode=%01d' % (url, int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
f = urlparse.urljoin(self.base_link, url)
url = f.rsplit('?', 1)[0]
r = client.request(url, mobile=True)
p = client.parseDOM(r, 'div', attrs = {'id': 'servers'})
if not p:
p = client.parseDOM(r, 'div', attrs = {'class': 'btn-groups.+?'})
p = client.parseDOM(p, 'a', ret='href')[0]
p = client.request(p, mobile=True)
p = client.parseDOM(p, 'div', attrs = {'id': 'servers'})
r = client.parseDOM(p, 'li')
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
try:
s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0]
e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0]
r = [(i[0], re.findall('(\d+)', i[1])) for i in r]
r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1]
r = [i[0] for i in r if s == i[1] and e == i[2]]
except:
r = [i[0] for i in r]
for u in r:
try:
headers = {'Referer': u}
url = client.request(u, headers=headers)
url = client.parseDOM(url, 'source', ret='src')
for i in url:
rd = client.request(i, headers=headers, output='geturl')
if '.google' in rd:
sources.append({'source': 'gvideo', 'quality': directstream.googletag(rd)[0]['quality'], 'language': 'en', 'url': rd, 'direct': True, 'debridonly': False})
except:
pass
try:
url = client.request(u, mobile=True)
url = client.parseDOM(url, 'source', ret='src')
if '../moviexk.php' in url[0]:
url[0] = url[0].replace('..','')
url[0] = urlparse.urljoin(self.base_link, url[0])
url[0] = client.request(url[0], mobile=True, output='geturl')
else:
url = [i.strip().split()[0] for i in url]
for i in url:
try:
sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| {
"content_hash": "9980038aade5eb9f210e79b1bf4c0552",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 183,
"avg_line_length": 38.02040816326531,
"alnum_prop": 0.4918142780461621,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "efc039a2b1d4e79d805cd6bf97fc427773ad4c0d",
"size": "7493",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "script.module.exodus/lib/resources/lib/sources/en/to_be_fixed/sitedown/moviexk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import os
import sys
import copy
import time
import tarfile
import base64
sys.path.append('../../')
from externals.simple_oss import SimpleOss
from batchcompute import (
Client, JobDescription, TaskDag, TaskDescription, ResourceDescription
)
import config as cfg
INSTANCE_COUNT = 4
oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
def upload_worker(bucket, local_dir, oss_path):
'''
A function to help upload worker package to oss.
'''
local_tarfile = 'worker.tar.gz'
if os.path.exists(local_tarfile): os.remove(local_tarfile)
def do_tar(worker_dir, tar_file):
'''
A function to tar worker package.
'''
tar = tarfile.open(tar_file, 'w:gz')
cwd = os.getcwd()
os.chdir(worker_dir)
for root,dir,files in os.walk('.'):
for file in files:
tar.add(os.path.join(root, file))
os.chdir(cwd)
tar.close()
do_tar(local_dir, local_tarfile)
oss_clnt.upload(bucket, local_tarfile, oss_path)
def get_job_desc(package_path, verbose=True):
job_desc = JobDescription()
find_task= TaskDescription()
# find task description map.
find_task.PackageUri = package_path
find_task.ProgramName = 'find_prime_multi_instance_worker.py'
find_task.ProgramType = 'python'
find_task.ImageId = cfg.IMAGE_ID
find_task.InstanceCount = INSTANCE_COUNT
find_task.EnvironmentVariables = {}
find_task.StdoutRedirectPath = cfg.LOG_PATH
find_task.StderrRedirectPath = cfg.LOG_PATH
# Create task dag.
task_dag = TaskDag()
task_dag.add_task(task_name='Find', task=find_task)
# find prime job description.
job_desc.TaskDag = task_dag
job_desc.JobName = 'find-prime'
job_desc.Priority = 1
return job_desc
def main():
upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)
# Submit job to batch compute.
clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
job_desc = get_job_desc(cfg.FULL_PACKAGE)
job = clnt.create_job(job_desc)
t = 10
print('Sleep %s second, please wait.'%t)
time.sleep(t)
# Wait for jobs terminated.
while(True):
s = clnt.get_job(job)
if s.State in ['Waiting', 'Running']:
print('Job %s is now %s'%(job, s.State))
time.sleep(3)
continue
else:
# 'Failed', 'Stopped', 'Finished'
print('Job %s is now %s'%(job, s.State))
if s.State == 'Finished':
result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.OUTPUT_PATH)
# Print out all prime numbers from 0 to 10000.
print result.splitlines()
break
clnt.delete_job(job)
if __name__ == '__main__':
main()
| {
"content_hash": "d7e8d4b24476b5c2f1343d2935c34737",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 28.926315789473684,
"alnum_prop": 0.6171761280931587,
"repo_name": "luzhijun/Optimization",
"id": "38576dc76271af343b5e9be779e673ba9644e352",
"size": "2748",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "cma-es/batchcompute_python_sdk/examples/find_prime_multi_instance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11592"
},
{
"name": "C++",
"bytes": "41273"
},
{
"name": "CSS",
"bytes": "8912"
},
{
"name": "HTML",
"bytes": "845295"
},
{
"name": "JavaScript",
"bytes": "185036"
},
{
"name": "Jupyter Notebook",
"bytes": "1680887"
},
{
"name": "Makefile",
"bytes": "166"
},
{
"name": "Matlab",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "1912745"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
length = 12
width = 10
height = 6
face1 = length * width
face2 = length * height
face3 = width * height
area = (2 * face1) + (2 * face2) + (2 * face3)
print "The surface area of a rectangular solid with"
print "length", length
print "width", width
print "height", height
print "is", area, "square units" | {
"content_hash": "d17d2dafc86cc94ca0e35eb2ea0db99b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 25.25,
"alnum_prop": 0.6798679867986799,
"repo_name": "guvjose/ComputerScience1",
"id": "0e12e62122053719481c6de9a2443ec77b4de8e9",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Labs/Lab_1/lab1_area.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9072"
}
],
"symlink_target": ""
} |
import os
import tempfile
import warnings
from subprocess import PIPE
from nltk.internals import (
_java_options,
config_java,
find_jar_iter,
find_jars_within_path,
java,
)
from nltk.parse.api import ParserI
from nltk.parse.dependencygraph import DependencyGraph
from nltk.tree import Tree
_stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml"
class GenericStanfordParser(ParserI):
"""Interface to the Stanford Parser"""
_MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar"
_JAR = r"stanford-parser\.jar"
_MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser"
_USE_STDIN = False
_DOUBLE_SPACED_OUTPUT = False
def __init__(
self,
path_to_jar=None,
path_to_models_jar=None,
model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz",
encoding="utf8",
verbose=False,
java_options="-mx4g",
corenlp_options="",
):
# find the most recent code and model jar
stanford_jar = max(
find_jar_iter(
self._JAR,
path_to_jar,
env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"),
searchpath=(),
url=_stanford_url,
verbose=verbose,
is_regex=True,
),
key=lambda model_path: os.path.dirname(model_path),
)
model_jar = max(
find_jar_iter(
self._MODEL_JAR_PATTERN,
path_to_models_jar,
env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"),
searchpath=(),
url=_stanford_url,
verbose=verbose,
is_regex=True,
),
key=lambda model_path: os.path.dirname(model_path),
)
# self._classpath = (stanford_jar, model_jar)
# Adding logging jar files to classpath
stanford_dir = os.path.split(stanford_jar)[0]
self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir))
self.model_path = model_path
self._encoding = encoding
self.corenlp_options = corenlp_options
self.java_options = java_options
def _parse_trees_output(self, output_):
res = []
cur_lines = []
cur_trees = []
blank = False
for line in output_.splitlines(False):
if line == "":
if blank:
res.append(iter(cur_trees))
cur_trees = []
blank = False
elif self._DOUBLE_SPACED_OUTPUT:
cur_trees.append(self._make_tree("\n".join(cur_lines)))
cur_lines = []
blank = True
else:
res.append(iter([self._make_tree("\n".join(cur_lines))]))
cur_lines = []
else:
cur_lines.append(line)
blank = False
return iter(res)
def parse_sents(self, sentences, verbose=False):
"""
Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
list where each sentence is a list of words.
Each sentence will be automatically tagged with this StanfordParser instance's
tagger.
If whitespaces exists inside a token, then the token will be treated as
separate tokens.
:param sentences: Input sentences to parse
:type sentences: list(list(str))
:rtype: iter(iter(Tree))
"""
cmd = [
self._MAIN_CLASS,
"-model",
self.model_path,
"-sentences",
"newline",
"-outputFormat",
self._OUTPUT_FORMAT,
"-tokenized",
"-escaper",
"edu.stanford.nlp.process.PTBEscapingProcessor",
]
return self._parse_trees_output(
self._execute(
cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose
)
)
def raw_parse(self, sentence, verbose=False):
"""
Use StanfordParser to parse a sentence. Takes a sentence as a string;
before parsing, it will be automatically tokenized and tagged by
the Stanford Parser.
:param sentence: Input sentence to parse
:type sentence: str
:rtype: iter(Tree)
"""
return next(self.raw_parse_sents([sentence], verbose))
def raw_parse_sents(self, sentences, verbose=False):
"""
Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
list of strings.
Each sentence will be automatically tokenized and tagged by the Stanford Parser.
:param sentences: Input sentences to parse
:type sentences: list(str)
:rtype: iter(iter(Tree))
"""
cmd = [
self._MAIN_CLASS,
"-model",
self.model_path,
"-sentences",
"newline",
"-outputFormat",
self._OUTPUT_FORMAT,
]
return self._parse_trees_output(
self._execute(cmd, "\n".join(sentences), verbose)
)
def tagged_parse(self, sentence, verbose=False):
"""
Use StanfordParser to parse a sentence. Takes a sentence as a list of
(word, tag) tuples; the sentence must have already been tokenized and
tagged.
:param sentence: Input sentence to parse
:type sentence: list(tuple(str, str))
:rtype: iter(Tree)
"""
return next(self.tagged_parse_sents([sentence], verbose))
def tagged_parse_sents(self, sentences, verbose=False):
"""
Use StanfordParser to parse multiple sentences. Takes multiple sentences
where each sentence is a list of (word, tag) tuples.
The sentences must have already been tokenized and tagged.
:param sentences: Input sentences to parse
:type sentences: list(list(tuple(str, str)))
:rtype: iter(iter(Tree))
"""
tag_separator = "/"
cmd = [
self._MAIN_CLASS,
"-model",
self.model_path,
"-sentences",
"newline",
"-outputFormat",
self._OUTPUT_FORMAT,
"-tokenized",
"-tagSeparator",
tag_separator,
"-tokenizerFactory",
"edu.stanford.nlp.process.WhitespaceTokenizer",
"-tokenizerMethod",
"newCoreLabelTokenizerFactory",
]
# We don't need to escape slashes as "splitting is done on the last instance of the character in the token"
return self._parse_trees_output(
self._execute(
cmd,
"\n".join(
" ".join(tag_separator.join(tagged) for tagged in sentence)
for sentence in sentences
),
verbose,
)
)
def _execute(self, cmd, input_, verbose=False):
encoding = self._encoding
cmd.extend(["-encoding", encoding])
if self.corenlp_options:
cmd.extend(self.corenlp_options.split())
default_options = " ".join(_java_options)
# Configure java.
config_java(options=self.java_options, verbose=verbose)
# Windows is incompatible with NamedTemporaryFile() without passing in delete=False.
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file:
# Write the actual sentences to the temporary input file
if isinstance(input_, str) and encoding:
input_ = input_.encode(encoding)
input_file.write(input_)
input_file.flush()
# Run the tagger and get the output.
if self._USE_STDIN:
input_file.seek(0)
stdout, stderr = java(
cmd,
classpath=self._classpath,
stdin=input_file,
stdout=PIPE,
stderr=PIPE,
)
else:
cmd.append(input_file.name)
stdout, stderr = java(
cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE
)
stdout = stdout.replace(b"\xc2\xa0", b" ")
stdout = stdout.replace(b"\x00\xa0", b" ")
stdout = stdout.decode(encoding)
os.unlink(input_file.name)
# Return java configurations to their default values.
config_java(options=default_options, verbose=False)
return stdout
class StanfordParser(GenericStanfordParser):
"""
>>> parser=StanfordParser(
... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
... ) # doctest: +SKIP
>>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])]
>>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents((
... "the quick brown fox jumps over the lazy dog",
... "the quick grey wolf jumps over the lazy fox"
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP',
[Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP',
[Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']),
Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])]
>>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents((
... "I 'm a dog".split(),
... "This is my friends ' cat ( the tabby )".split(),
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]),
Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP',
[Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']),
Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []),
Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])]
>>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents((
... (
... ("The", "DT"),
... ("quick", "JJ"),
... ("brown", "JJ"),
... ("fox", "NN"),
... ("jumped", "VBD"),
... ("over", "IN"),
... ("the", "DT"),
... ("lazy", "JJ"),
... ("dog", "NN"),
... (".", "."),
... ),
... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP',
[Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])]
"""
_OUTPUT_FORMAT = "penn"
def __init__(self, *args, **kwargs):
warnings.warn(
"The StanfordParser will be deprecated\n"
"Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def _make_tree(self, result):
return Tree.fromstring(result)
class StanfordDependencyParser(GenericStanfordParser):
"""
>>> dep_parser=StanfordDependencyParser(
... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
... ) # doctest: +SKIP
>>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])]
>>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
[[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
... "The quick brown fox jumps over the lazy dog.",
... "The quick grey wolf jumps over the lazy fox."
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]),
Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])]
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
... "I 'm a dog".split(),
... "This is my friends ' cat ( the tabby )".split(),
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])]
>>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents((
... (
... ("The", "DT"),
... ("quick", "JJ"),
... ("brown", "JJ"),
... ("fox", "NN"),
... ("jumped", "VBD"),
... ("over", "IN"),
... ("the", "DT"),
... ("lazy", "JJ"),
... ("dog", "NN"),
... (".", "."),
... ),
... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
[[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
"""
_OUTPUT_FORMAT = "conll2007"
def __init__(self, *args, **kwargs):
warnings.warn(
"The StanfordDependencyParser will be deprecated\n"
"Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def _make_tree(self, result):
return DependencyGraph(result, top_relation_label="root")
class StanfordNeuralDependencyParser(GenericStanfordParser):
"""
>>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP
>>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP
>>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])]
>>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
[[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det',
(u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'),
u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')),
((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det',
(u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'),
u'punct', (u'.', u'.'))]]
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
... "The quick brown fox jumps over the lazy dog.",
... "The quick grey wolf jumps over the lazy fox."
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over',
'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']),
Tree('fox', ['over', 'the', 'lazy']), '.'])]
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
... "I 'm a dog".split(),
... "This is my friends ' cat ( the tabby )".split(),
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
[Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends',
['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])]
"""
_OUTPUT_FORMAT = "conll"
_MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
_JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar"
_MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar"
_USE_STDIN = True
_DOUBLE_SPACED_OUTPUT = True
def __init__(self, *args, **kwargs):
warnings.warn(
"The StanfordNeuralDependencyParser will be deprecated\n"
"Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse"
def tagged_parse_sents(self, sentences, verbose=False):
"""
Currently unimplemented because the neural dependency parser (and
the StanfordCoreNLP pipeline class) doesn't support passing in pre-
tagged tokens.
"""
raise NotImplementedError(
"tagged_parse[_sents] is not supported by "
"StanfordNeuralDependencyParser; use "
"parse[_sents] or raw_parse[_sents] instead."
)
def _make_tree(self, result):
return DependencyGraph(result, top_relation_label="ROOT")
| {
"content_hash": "327c54b44767c164d0c50114c13fc111",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 152,
"avg_line_length": 40.29004329004329,
"alnum_prop": 0.5256258729988181,
"repo_name": "nltk/nltk",
"id": "03db9378253404cda806784ed4225f2c0c354935",
"size": "18842",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nltk/parse/stanford.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "24786"
},
{
"name": "Jupyter Notebook",
"bytes": "55608"
},
{
"name": "Makefile",
"bytes": "7983"
},
{
"name": "Python",
"bytes": "4831858"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
import unittest
import sympy as sp
import numpy as np
import sys
import os
sys.path.append('.')
import stats.methods as methods
from stats.utils import *
class TestBasicSearch(unittest.TestCase):
def setUp(self):
self.num_vals = 20 # number of source values
def test_quadratic(self):
sym_x, sym_y = sp.symbols('x y')
sym_a, sym_b, sym_c = sp.symbols('a b c')
sym_expr = sp.sympify('a*(x**2) + b*x + c')
sym_expr_delta = sp.sympify('y - (a*(x**2) + b*x + c)')
min_x = 1
max_x = 20
real_a = 2 # real 'a' value of source distribution
real_b = 3 # real 'b' value of source distiribution
real_c = 5 # real 'c' value of source distiribution
# real X values without errors
x = np.linspace(min_x, max_x,
self.num_vals, dtype=np.float)
# real Y values without errors
y = np.vectorize(
sp.lambdify(
sym_x, sym_expr.subs(
{sym_a: real_a,
sym_b: real_b,
sym_c: real_c}
),
'numpy'
)
)(x)
third_len = self.num_vals // 3
# get base values as half-distant pairs of values
base_values_dist = {
sym_x: (x[0], x[third_len], x[third_len * 2]),
sym_y: (y[0], y[third_len], y[third_len * 2])
}
# find params with basic method
basic_a, basic_b, basic_c = methods.search_basic(
delta_expression=sym_expr_delta,
parameters=(sym_a, sym_b, sym_c),
values=base_values_dist
)
self.assertAlmostEqual(real_a, basic_a, places=5)
self.assertAlmostEqual(real_b, basic_b, places=5)
self.assertAlmostEqual(real_c, basic_c, places=5)
| {
"content_hash": "e0ab41929cffdd3f813b3ef64d89159e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 29.359375,
"alnum_prop": 0.5141032464076637,
"repo_name": "budnyjj/NLRA",
"id": "688027061137a05b1e85dd7ad750b6aa5a197ff0",
"size": "1879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_three_dim_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109987"
}
],
"symlink_target": ""
} |
import os
import time
import stat
import json
import random
import ctypes
import inspect
import requests
import traceback
import threading
from collections import Counter
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.api import api_get
fake_upload = os.getenv("FAKEUPLOAD") is not None
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
class Uploader(object):
def __init__(self, dongle_id, access_token, root):
self.dongle_id = dongle_id
self.access_token = access_token
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
if not os.path.isdir(self.root):
return
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def get_data_stats(self):
name_counts = Counter()
total_size = 0
for name, key, fn in self.gen_upload_files():
name_counts[name] += 1
total_size += os.stat(fn).st_size
return dict(name_counts), total_size
def next_file_to_upload(self):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name in ["rlog", "rlog.bz2"]:
return (key, fn, 0)
# then upload camera files no not on wifi
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 1)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("v1.1/"+self.dongle_id+"/upload_url/", timeout=2, path=key, access_token=self.access_token)
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.info({"upload_url v1.1", url, str(headers)})
if fake_upload:
print "*** WARNING, THIS IS A FAKE UPLOAD TO %s ***" % url
class FakeResponse(object):
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f, headers=headers)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def upload(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code == 200:
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
params = Params()
dongle_id, access_token = params.get("DongleId"), params.get("AccessToken")
if dongle_id is None or access_token is None:
cloudlog.info("uploader MISSING DONGLE_ID or ACCESS_TOKEN")
raise Exception("uploader can't start without dongle id and access token")
uploader = Uploader(dongle_id, access_token, ROOT)
while True:
backoff = 0.1
while True:
if exit_event.is_set():
return
d = uploader.next_file_to_upload()
if d is None:
break
key, fn, _ = d
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff *= 2
cloudlog.info("upload done, success=%r", success)
time.sleep(5)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
| {
"content_hash": "45cc44a50649e126e55f0d9c61d3b384",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 116,
"avg_line_length": 27.353383458646615,
"alnum_prop": 0.6227322704782847,
"repo_name": "heidecjj/openpilot",
"id": "ccaf63252861c45421673def3b919c2fe0aa7ec4",
"size": "7298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfdrive/loggerd/uploader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654130"
},
{
"name": "C++",
"bytes": "24497"
},
{
"name": "Cap'n Proto",
"bytes": "27780"
},
{
"name": "Makefile",
"bytes": "7859"
},
{
"name": "Python",
"bytes": "207584"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
import copy
import unittest
from mahjong import flow
from mahjong.patterns import MatchResult
from mahjong.types import Hand, GameContext, GameSettings, Tile, TileGroup, Wall
class TestHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.handler = flow.Handler()
def test_default_implementation(self):
with self.assertRaises(NotImplementedError):
self.handler.handle(self.context)
class TestIllegalState(unittest.TestCase):
def test_illegal_state(self):
c = GameContext()
c.state = 'no-such-state'
with self.assertRaises(ValueError):
flow.next(c)
class TestStartHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.wall = Wall()
def test_wall_shuffled(self):
# get first ten tiles
orig_tiles = [self.context.wall.tiles[i] for i in xrange(10)]
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'wall-built')
# test if it's shuffled
new_tiles = [self.context.wall.tiles[i] for i in xrange(10)]
self.assertNotEqual(orig_tiles, new_tiles)
def test_wall_complete(self):
# test if wall gets reset
self.context.wall.draw()
self.assertEqual(self.context.wall.num_tiles(), 143)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.wall.num_tiles(), 144)
def test_illegal_case(self):
self.context.wall = None
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
class TestWallBuiltHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext(settings=GameSettings())
self.context.state = 'wall-built'
# build wall
self.context.wall = Wall()
self.context.wall.shuffle()
def test_deal_16_tiles(self):
self.context.settings.num_hand_tiles = 16
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'dealt')
self.assert_num_free_tiles(self.context, 16)
def test_deal_13_tiles(self):
self.context.settings.num_hand_tiles = 13
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'dealt')
self.assert_num_free_tiles(self.context, 13)
def test_illegal_case(self):
self.context.wall.draw()
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
def assert_num_free_tiles(self, context, num_tiles):
self.assertEqual(len(context.players[0].hand.free_tiles), num_tiles)
self.assertEqual(len(context.players[1].hand.free_tiles), num_tiles)
self.assertEqual(len(context.players[2].hand.free_tiles), num_tiles)
self.assertEqual(len(context.players[3].hand.free_tiles), num_tiles)
self.assertEqual(context.players[0].hand.fixed_groups, [])
self.assertEqual(context.players[1].hand.fixed_groups, [])
self.assertEqual(context.players[2].hand.fixed_groups, [])
self.assertEqual(context.players[3].hand.fixed_groups, [])
self.assertEqual(context.players[0].hand.flowers, [])
self.assertEqual(context.players[1].hand.flowers, [])
self.assertEqual(context.players[2].hand.flowers, [])
self.assertEqual(context.players[3].hand.flowers, [])
self.assertIsNone(context.players[0].hand.last_tile)
self.assertIsNone(context.players[1].hand.last_tile)
self.assertIsNone(context.players[2].hand.last_tile)
self.assertIsNone(context.players[3].hand.last_tile)
class TestDealtHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext(settings=GameSettings())
self.context.state = 'dealt'
# build wall
self.context.wall = Wall()
self.context.wall.shuffle()
# deal some flowers to players
self.context.players[0].hand.add_free_tile(Tile.PLUM)
self.context.players[0].hand.add_free_tile(Tile.ORCHID)
self.context.players[1].hand.add_free_tile(Tile.BAMBOO)
self.context.players[1].hand.add_free_tile(Tile.CHRYSANTH)
self.context.players[3].hand.add_free_tile(Tile.SPRING)
self.context.players[3].hand.add_free_tile(Tile.SUMMER)
self.context.players[3].hand.add_free_tile(Tile.AUTUMN)
self.context.players[3].hand.add_free_tile(Tile.WINTER)
# remove flowers in wall
for flower in Tile.FLOWERS:
self.context.wall.tiles.remove(flower)
# add more tiles until each hand has 16 tiles
for player in self.context.players:
hand = player.hand
while len(hand.free_tiles) < 16:
tile = self.context.wall.draw()
hand.add_free_tile(tile)
def test_replace_flowers(self):
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.players[0].hand.flowers, [Tile.PLUM, Tile.ORCHID])
self.assertEqual(self.context.players[1].hand.flowers, [Tile.BAMBOO, Tile.CHRYSANTH])
self.assertEqual(self.context.players[2].hand.flowers, [])
self.assertEqual(self.context.players[3].hand.flowers, [Tile.SPRING, Tile.SUMMER, Tile.AUTUMN, Tile.WINTER])
# free_tiles should have 13 tiles and have no flowers
self.assertEqual(len(self.context.players[0].hand.free_tiles), 16)
self.assertEqual(len(self.context.players[1].hand.free_tiles), 16)
self.assertEqual(len(self.context.players[2].hand.free_tiles), 16)
self.assertEqual(len(self.context.players[3].hand.free_tiles), 16)
self.assertEqual(filter(lambda x: x.is_general_flower(), self.context.players[0].hand.free_tiles), [])
self.assertEqual(filter(lambda x: x.is_general_flower(), self.context.players[1].hand.free_tiles), [])
self.assertEqual(filter(lambda x: x.is_general_flower(), self.context.players[2].hand.free_tiles), [])
self.assertEqual(filter(lambda x: x.is_general_flower(), self.context.players[3].hand.free_tiles), [])
def test_illegal_case(self):
self.context.wall.draw()
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
class TestDrawingHandler(unittest.TestCase):
def setUp(self):
gs = GameSettings()
gs.tie_wall = 16
gs.tie_wall_per_kong = 2
self.context = GameContext(settings=gs)
self.context.state = 'drawing'
# build wall
self.context.wall = Wall()
self.context.wall.shuffle()
hands = [player.hand for player in self.context.players]
hands[0].free_tiles = [Tile.CHAR1]
hands[1].free_tiles = [Tile.CHAR2]
hands[2].free_tiles = [Tile.CHAR3]
hands[3].free_tiles = [Tile.CHAR4]
for __ in xrange(4):
self.context.wall.draw()
def test_initial_drawing(self):
hand = self.context.player().hand
# initial condition assertion
self.assertEqual(self.context.cur_player_idx, 0)
self.assertIsNone(self.context.last_player_idx)
self.assertIsNone(hand.last_tile)
orig_num_tiles = self.context.wall.num_tiles()
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.wall.num_tiles(), orig_num_tiles - 1)
self.assertTrue(hand.last_tile)
self.assertEqual(self.context.cur_player_idx, 0)
self.assertIsNone(self.context.last_player_idx)
self.assertEqual(self.context.state, 'drawn')
def test_middle_game(self):
self.context.cur_player_idx = 2
self.context.last_player_idx = 0
hand = self.context.player().hand
orig_num_tiles = self.context.wall.num_tiles()
self.assertIsNone(hand.last_tile)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.wall.num_tiles(), orig_num_tiles - 1)
self.assertTrue(hand.last_tile)
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.last_player_idx, 0)
self.assertEqual(self.context.state, 'drawn')
def test_seven_flowers_1(self):
# Scenario: Player 3 got seven flowers already. And player 1
# draws another flower.
# let player 3 have seven flowers
self.context.players[3].hand.flowers = Tile.FLOWERS[0:7]
# player 1 draws a flower -> player 3 can win
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.WINTER)
self.context.player().hand.move_flowers()
self.context.player().extra['flowered'] = True
# player 3 hasn't make a decision
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, None, ['win', 'skip']])
# player 3 decides to win
self.context.players[3].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertEqual(self.context.cur_player_idx, 3)
self.assertEqual(self.context.last_player_idx, 1)
self.assertTrue(self.context.player().hand.last_tile)
self.assertEqual(self.context.extra.get('flower_chucker'), 1)
self.assertEqual(len(self.context.player().hand.flowers), 8)
# check if intermediate data is cleaned up
self.assertEqual(len(self.context.extra), 1)
self.assertEqual(self.context.players[1].extra, {})
self.assertEqual(self.context.players[3].extra, { 'flowered': True })
def test_seven_flowers_2(self):
# Scenario: Player 2 got six flowers and player 3 got one
# flower already. And player 2 draws another flower.
# player 2 has six flowers, player 3 has one flower
self.context.players[2].hand.flowers = Tile.FLOWERS[0:6]
self.context.players[3].hand.add_flower(Tile.FLOWERS[6])
# player 2 draws a flower -> player 2 can rob player 3's flower
draw_for_player(self.context, 2, last_player_idx=1, tile=Tile.FLOWERS[7])
self.context.player().hand.move_flowers()
self.context.player().extra['flowered'] = True
# player 2 hasn't make decision
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, ['win', 'skip'], None])
# player 2 decides to win
self.context.players[2].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.last_player_idx, 1)
self.assertTrue(self.context.player().hand.last_tile)
self.assertEqual(self.context.extra.get('flower_chucker'), 3)
self.assertEqual(len(self.context.player().hand.flowers), 8)
# check if intermediate data is cleaned up
self.assertEqual(len(self.context.extra), 1)
self.assertEqual(self.context.players[2].extra, { 'flowered': True })
self.assertEqual(self.context.players[3].extra, {})
def test_seven_flowers_no_one_can_win(self):
# Scenario: Player 3 has six flowers. And player 1 draws a flower.
# Nothing should happend.
# let player 3 have six flowers
self.context.players[3].hand.flowers = Tile.FLOWERS[0:6]
# player 1 draws a flower
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.WINTER)
self.context.player().hand.move_flowers()
self.context.player().extra['flowered'] = True
num_tiles_before = self.context.wall.num_tiles()
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertNotEqual(self.context.player().hand.last_tile, Tile.WINTER)
self.assertEqual(self.context.wall.num_tiles(), num_tiles_before - 1)
# check if intermediate data is cleaned up
self.assertEqual(self.context.extra, {})
self.assertEqual(self.context.player().extra, { 'flowered': True })
def test_seven_flowers_declared_ready(self):
# let player 3 have seven flowers and declare ready
self.context.players[3].hand.flowers = Tile.FLOWERS[0:7]
self.context.players[3].extra['declared_ready'] = True
# player 2 draws a flower -> player 3 can win
draw_for_player(self.context, 2, last_player_idx=3, tile=Tile.WINTER)
self.context.player().hand.move_flowers()
self.context.player().extra['flowered'] = True
# player 3 wins without making a decision since he declared ready
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertEqual(self.context.cur_player_idx, 3)
self.assertEqual(self.context.last_player_idx, 2)
self.assertTrue(self.context.player().hand.last_tile)
self.assertEqual(self.context.extra.get('flower_chucker'), 2)
# check if intermediate data is cleaned up
self.assertEqual(len(self.context.extra), 1)
self.assertEqual(self.context.players[2].extra, {})
def test_seven_flowers_bot(self):
# let player 3 have seven flowers and declare ready
self.context.players[3].hand.flowers = Tile.FLOWERS[0:7]
self.context.players[3].extra['bot'] = True
# player 2 draws a flower -> player 3 can win
draw_for_player(self.context, 2, last_player_idx=3, tile=Tile.WINTER)
self.context.player().hand.move_flowers()
self.context.player().extra['flowered'] = True
# bot makes decision for player 3
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertEqual(self.context.cur_player_idx, 3)
self.assertEqual(self.context.last_player_idx, 2)
self.assertTrue(self.context.player().hand.last_tile)
self.assertEqual(self.context.extra.get('flower_chucker'), 2)
# check if intermediate data is cleaned up
self.assertEqual(len(self.context.extra), 1)
self.assertEqual(self.context.players[2].extra, {})
def test_wall_tie(self):
self.context.cur_player_idx = 3
self.context.last_player_idx = 2
hand = self.context.player().hand
# give player 3 two fake kongs
hand.fixed_groups.append(TileGroup([Tile.RED] * 4, TileGroup.KONG_CONCEALED))
hand.fixed_groups.append(TileGroup([Tile.GREEN] * 4, TileGroup.KONG_EXPOSED))
while self.context.wall.num_tiles() != 21:
self.context.wall.draw()
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
self.assertTrue(hand.last_tile)
self.context.state = 'drawing'
hand.last_tile = None
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertFalse(self.context.winners)
self.assertEqual(self.context.extra.get('tie_type'), 'wall')
def test_bad_context(self):
self.context.player().hand.last_tile = Tile.RED
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
self.context = context2
self.context.cur_player_idx = 2
self.context.player().extra['flowered'] = True
self.assertFalse(flow.next(self.context))
class TestDrawnHandler(unittest.TestCase):
def setUp(self):
gs = GameSettings()
gs.tie_wall = 16
gs.tie_wall_per_kong = 1
gs.tie_on_4_kongs = True
self.context = GameContext(settings=gs)
self.context.state = 'drawn'
# build wall
self.context.wall = Wall()
self.context.wall.shuffle()
hands = [p.hand for p in self.context.players]
# lousy hand for player 0
hands[0].add_free_tiles([Tile.CHAR2, Tile.CHAR4, Tile.CHAR9,
Tile.CIRCLE9, Tile.BAMBOO1, Tile.BAMBOO4,
Tile.EAST, Tile.SOUTH, Tile.WEST,
Tile.NORTH, Tile.RED, Tile.GREEN, Tile.GREEN])
# ready hand for player 1 - waiting for CHAR6 and CHAR9
hands[1].add_free_tiles([Tile.CHAR2, Tile.CHAR2, Tile.CHAR7,
Tile.CHAR8, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH,
Tile.SOUTH, Tile.WHITE, Tile.WHITE, Tile.WHITE])
# lousy hand for player 2
hands[2].add_free_tiles([Tile.CIRCLE1, Tile.CIRCLE2, Tile.CIRCLE3,
Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9,
Tile.EAST, Tile.EAST, Tile.WEST, Tile.NORTH])
# ready hand for player 3 - waiting for CHAR5 and CIRCLE3
hands[3].add_free_tiles([Tile.CHAR5, Tile.CHAR5, Tile.CHAR6,
Tile.CHAR7, Tile.CHAR8, Tile.CIRCLE3,
Tile.CIRCLE3, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO6, Tile.BAMBOO7, Tile.BAMBOO8])
# remove same number of tiles dealt
for __ in xrange(52):
self.context.wall.draw()
def test_4_kong_concealed_win(self):
self.context.settings.patterns_win.append('four-kongs')
# player 2 has 3 kongs already, and he draws another EAST for
# him to make the 4th kong
self.context.players[2].hand.fixed_groups = [
TileGroup([Tile.CIRCLE1] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.CIRCLE3] * 4, TileGroup.KONG_CONCEALED),
TileGroup([Tile.CIRCLE5] * 4, TileGroup.KONG_EXPOSED)
]
self.context.players[2].hand.add_free_tile(Tile.EAST)
draw_for_player(self.context, 2, last_player_idx=3, tile=Tile.EAST)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, ['win', 'skip'], None])
self.context.players[2].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [2])
self.assertEqual(self.context.players[2].extra.get('win_type'), 'self-picked')
def test_4_kong_exposed_win(self):
self.context.settings.patterns_win.append('four-kongs')
# player 2 has 3 kongs and 1 pong already, and he draws another WHITE for
# him to make an appended kong
self.context.players[2].hand.fixed_groups = [
TileGroup([Tile.CIRCLE1] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.CIRCLE3] * 4, TileGroup.KONG_CONCEALED),
TileGroup([Tile.WHITE] * 3, TileGroup.PONG),
TileGroup([Tile.CIRCLE5] * 4, TileGroup.KONG_EXPOSED)
]
draw_for_player(self.context, 2, last_player_idx=3, tile=Tile.WHITE)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, ['win', 'skip'], None])
# player 2 can win with an appended kong, but it's possible that
# somebody can rob the kong, so the state is switched to 'self-konging'
# instead of 'end'
self.context.players[2].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'self-konging')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.player().hand.last_tile, Tile.WHITE)
def test_eight_flowers(self):
# let player 1 have seven flowers
self.context.players[1].hand.flowers = Tile.FLOWERS[0:7]
# player 1 draws the 8th flower
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.CHRYSANTH)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(len(self.context.player().hand.flowers), 8)
self.assertFalse(self.context.player().hand.last_tile)
self.assertTrue(self.context.player().extra.get('flowered'))
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawn')
# assume player draws a RED which he cannot win with
self.context.player().hand.last_tile = Tile.RED
context2 = self.context.clone()
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, None])
self.context.players[1].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'flower-won')
# test it again, this time player has a winning hand
self.context = context2
self.context.player().hand.last_tile = Tile.CHAR6
self.context.players[1].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'self-picked')
def test_seven_flowers(self):
# player 3 has eight flowers, one of them was robbed from player 0
self.context.players[3].hand.flowers = copy.copy(Tile.FLOWERS)
self.context.extra['flower_chucker'] = 0
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.CIRCLE4)
context2 = self.context.clone()
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, None, ['win', 'skip']])
self.context.players[3].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [3])
self.assertEqual(self.context.players[3].extra.get('win_type'), 'flower-won')
self.assertEqual(self.context.extra.get('flower_chucker'), 0)
# test it again, this time player has a winning hand
self.context = context2
self.context.player().hand.last_tile = Tile.CHAR5
self.context.player().decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [3])
self.assertEqual(self.context.players[3].extra.get('win_type'), 'self-picked')
self.assertEqual(self.context.extra.get('flower_chucker'), 0)
def test_seven_flowers_plus_self_pick(self):
# player 1 has eight flowers, one of them was robbed from player 3
# and player 1 also gets a winning hand by self-picking
self.context.players[1].hand.flowers = copy.copy(Tile.FLOWERS)
self.context.extra['flower_chucker'] = 3
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.CHAR9)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, None])
self.context.players[1].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'self-picked')
self.assertIsNone(self.context.players[1].extra.get('chucker'))
self.assertEqual(self.context.extra.get('flower_chucker'), 3)
def test_flower(self):
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.SUMMER)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertIsNone(self.context.player().hand.last_tile)
self.assertIn(Tile.SUMMER, self.context.player().hand.flowers)
def test_no_choices_but_skip(self):
draw_for_player(self.context, 2, last_player_idx=1, tile=Tile.CIRCLE9)
self.context.player().decision = 'doesnt-matter'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.player().hand.last_tile, Tile.CIRCLE9)
def test_self_pick(self):
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.CIRCLE3)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, None, ['win', 'skip']])
self.context.player().decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [3])
self.assertEqual(self.context.players[3].extra.get('win_type'), 'self-picked')
def test_appended_kong(self):
self.context.players[2].hand.pong(Tile.EAST)
draw_for_player(self.context, 2, last_player_idx=1, tile=Tile.EAST)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, ['kong', 'skip'], None])
self.context.player().decision = 'kong'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'self-konging')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.players[2].hand.last_tile, Tile.EAST)
def test_concealed_kong(self):
self.context.players[0].hand.free_tiles[10] = Tile.GREEN
draw_for_player(self.context, 0, last_player_idx=1, tile=Tile.GREEN)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [['kong', 'skip'], None, None, None])
self.context.player().decision = 'kong'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'self-konging')
self.assertEqual(self.context.cur_player_idx, 0)
self.assertEqual(self.context.players[0].hand.last_tile, Tile.GREEN)
def test_skip_self_pick(self):
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.CHAR6)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, None])
self.context.player().decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.player().hand.last_tile, Tile.CHAR6)
def test_skip_kong(self):
self.context.players[3].hand.pong(Tile.CHAR5)
draw_for_player(self.context, 3, last_player_idx=1, tile=Tile.CHAR5)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, None, ['kong', 'skip']])
self.context.player().decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 3)
self.assertEqual(self.context.player().hand.last_tile, Tile.CHAR5)
def test_declared_ready_win(self):
self.context.players[0].extra['declared_ready'] = True
self.context.players[0].extra['waiting_tiles'] = [Tile.RED, Tile.GREEN]
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.RED)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [0])
self.assertEqual(self.context.players[0].extra.get('win_type'), 'self-picked')
def test_declared_ready_skip(self):
self.context.players[1].extra['declared_ready'] = True
self.context.players[1].extra['waiting_tiles'] = [Tile.RED, Tile.GREEN]
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.WHITE)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.player().hand.last_tile, Tile.WHITE)
def test_bot_always_chooses_to_win(self):
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.CIRCLE3)
self.context.players[3].extra['bot'] = True
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [3])
self.assertEqual(self.context.players[3].extra.get('win_type'), 'self-picked')
def test_bot_doesnt_kong(self):
self.context.players[3].hand.pong(Tile.CHAR5)
draw_for_player(self.context, 3, last_player_idx=1, tile=Tile.CHAR5)
self.context.players[3].extra['bot'] = True
self.assertTrue(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 3)
self.assertEqual(self.context.player().hand.last_tile, Tile.CHAR5)
def test_bad_context(self):
self.assertFalse(flow.next(self.context))
class TestSelfKongingHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'self-konging'
self.context.num_hand_tiles = 13
# build wall
self.context.wall = Wall()
hands = [p.hand for p in self.context.players]
# player 0 almost finishes four kongs
hands[0].add_free_tiles([Tile.NORTH, Tile.NORTH, Tile.NORTH, Tile.EAST])
hands[0].fixed_groups += [
TileGroup([Tile.RED] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.GREEN] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.CHAR6] * 3, TileGroup.PONG)
]
# player 1 - waiting for CHAR6 and CHAR9
hands[1].add_free_tiles([Tile.CHAR2, Tile.CHAR2, Tile.CHAR7,
Tile.CHAR8, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH,
Tile.SOUTH, Tile.WHITE, Tile.WHITE, Tile.WHITE])
# player 2 - lousy hand
hands[2].add_free_tiles([Tile.CIRCLE1, Tile.CIRCLE2, Tile.CIRCLE3,
Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9,
Tile.EAST, Tile.EAST, Tile.WEST, Tile.NORTH])
# player 3 - waiting for CHAR3 and CHAR6
hands[3].add_free_tiles([Tile.CHAR5, Tile.CHAR5, Tile.CHAR4,
Tile.CHAR5, Tile.CIRCLE3, Tile.CIRCLE3,
Tile.CIRCLE3, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO6, Tile.BAMBOO7, Tile.BAMBOO8])
def test_no_one_can_rob(self):
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.NORTH)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 0)
self.assertIsNone(self.context.player().hand.last_tile)
self.assertIn(TileGroup([Tile.NORTH] * 4, TileGroup.KONG_CONCEALED),
self.context.player().hand.fixed_groups)
def test_one_rob(self):
# change player 0's CHAR6 triplet to CHAR3 triplet
self.context.players[0].hand.fixed_groups[2].tiles = [Tile.CHAR3] * 3
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR3)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, None, None, ['win', 'skip']])
self.context.players[3].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [3])
self.assertEqual(self.context.players[3].extra.get('win_type'), 'robbed')
self.assertEqual(self.context.players[3].extra.get('chucker'), 0)
def test_multi_robs(self):
self.context.settings.multi_winners = True
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR6)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, ['win', 'skip']])
self.context.players[1].decision = 'win'
self.context.players[3].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1, 3])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'robbed')
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
self.assertEqual(self.context.players[3].extra.get('win_type'), 'robbed')
self.assertEqual(self.context.players[3].extra.get('chucker'), 0)
def test_multi_robs_but_only_one_allowed(self):
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR6)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, ['win', 'skip']])
self.context.players[1].decision = 'win'
self.context.players[3].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'robbed')
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
def test_4_kong_tie(self):
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR6)
self.context.players[0].hand.kong_from_self()
self.context.settings.tie_on_4_kongs = False
orig_context = self.context.clone()
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.WHITE)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertIsNone(self.context.player().hand.last_tile)
# use the same context again but with tie_on_4_kongs turned on
self.context = orig_context
self.context.settings.tie_on_4_kongs = True
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.WHITE)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertFalse(self.context.winners)
self.assertEqual(self.context.extra.get('tie_type'), '4-kong')
def test_4_kong_win(self):
self.context.settings.patterns_win.append('four-kongs')
# player 0 makes the 3rd kong
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.NORTH)
self.context.players[0].hand.kong_from_self()
# player 0 has a CHAR6 pong, and he draws another CHAR6
# player 1 and 3 also wait for CHAR6
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR6)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'skip'], None, ['win', 'skip']])
# player 1 and 3 skip
self.context.players[1].decision = 'skip'
self.context.players[3].decision = 'skip'
# player 0 wins with 4 kongs
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [0])
self.assertEqual(self.context.players[0].extra.get('win_type'), 'self-picked')
def test_bad_context(self):
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
self.context.player().hand.last_tile = Tile.SOUTH
context2 = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, context2)
class TestDiscardingHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'discarding'
self.context.wall = Wall()
hands = [p.hand for p in self.context.players]
# lousy hand for player 0
hands[0].add_free_tiles([Tile.CHAR2, Tile.CHAR4, Tile.CHAR9,
Tile.CIRCLE9, Tile.BAMBOO1, Tile.BAMBOO4,
Tile.EAST, Tile.SOUTH, Tile.WEST,
Tile.NORTH, Tile.RED, Tile.GREEN, Tile.GREEN])
# ready hand for player 1 - waiting for CHAR6 and CHAR9
hands[1].add_free_tiles([Tile.CHAR2, Tile.CHAR2, Tile.CHAR7,
Tile.CHAR8, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH,
Tile.SOUTH, Tile.WHITE, Tile.WHITE, Tile.WHITE])
# lousy hand for player 2
hands[2].add_free_tiles([Tile.CIRCLE1, Tile.CIRCLE2, Tile.CIRCLE3,
Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9,
Tile.EAST, Tile.EAST, Tile.WEST, Tile.NORTH])
# ready hand for player 3 - waiting for CHAR5 and CIRCLE3
hands[3].add_free_tiles([Tile.CHAR5, Tile.CHAR5, Tile.CHAR6,
Tile.CHAR7, Tile.CHAR8, Tile.CIRCLE3,
Tile.CIRCLE3, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO6, Tile.BAMBOO7, Tile.BAMBOO8])
def test_declared_ready_discard(self):
self.context.players[0].extra['declared_ready'] = True
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.WHITE)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertIsNone(self.context.player().hand.last_tile)
self.assertEqual(self.context.player().discarded, [Tile.WHITE])
self.assertEqual(self.context.last_discarded(), Tile.WHITE)
def test_discarding_hints(self):
# player 1 draws and discards CHAR8
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.CHAR8)
orig_context = self.context.clone()
hand = self.context.player().hand
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[None, set(hand.free_tiles + [hand.last_tile]), None, None])
self.assertEqual(self.context, orig_context)
def test_discard_tiles(self):
# player 1 draws and discards CHAR8
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.CHAR8)
self.context.player().decision = Tile.CHAR8
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertIsNone(self.context.player().hand.last_tile)
self.assertEqual(self.context.players[0].discarded, [])
self.assertEqual(self.context.players[1].discarded, [Tile.CHAR8])
self.assertEqual(self.context.players[2].discarded, [])
self.assertEqual(self.context.players[3].discarded, [])
self.assertEqual(self.context.discarded_pool, [Tile.CHAR8])
self.assertEqual(self.context.player().hand.free_tiles,
[Tile.CHAR2, Tile.CHAR2, Tile.CHAR7,
Tile.CHAR8, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH,
Tile.SOUTH, Tile.WHITE, Tile.WHITE, Tile.WHITE])
self.assertIsNone(self.context.player().decision)
# if player hasn't make decision, flow.next() won't change the context
self.context.state = 'discarding'
draw_for_player(self.context, 2, last_player_idx=1, tile=Tile.CHAR9)
self.context.player().decision = None
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
# player 2 discards EAST
self.context.player().decision = Tile.EAST
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertEqual(self.context.players[0].discarded, [])
self.assertEqual(self.context.players[1].discarded, [Tile.CHAR8])
self.assertEqual(self.context.players[2].discarded, [Tile.EAST])
self.assertEqual(self.context.players[3].discarded, [])
self.assertEqual(self.context.discarded_pool, [Tile.CHAR8, Tile.EAST])
self.assertIsNone(self.context.player().hand.last_tile)
self.assertEqual(self.context.player().hand.free_tiles,
[Tile.CHAR9, Tile.CIRCLE1, Tile.CIRCLE2, Tile.CIRCLE3,
Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9,
Tile.EAST, Tile.WEST, Tile.NORTH])
self.assertIsNone(self.context.player().decision)
# player 2 draws a CHAR7 and discards BAMBOO7
self.context.state = 'discarding'
draw_for_player(self.context, 2, last_player_idx=1, tile=Tile.CHAR7)
self.context.player().decision = Tile.BAMBOO7
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertEqual(self.context.players[0].discarded, [])
self.assertEqual(self.context.players[1].discarded, [Tile.CHAR8])
self.assertEqual(self.context.players[2].discarded, [Tile.EAST, Tile.BAMBOO7])
self.assertEqual(self.context.players[3].discarded, [])
self.assertEqual(self.context.discarded_pool, [Tile.CHAR8, Tile.EAST, Tile.BAMBOO7])
self.assertIsNone(self.context.player().hand.last_tile)
self.assertEqual(self.context.player().hand.free_tiles,
[Tile.CHAR7, Tile.CHAR9, Tile.CIRCLE1,
Tile.CIRCLE2, Tile.CIRCLE3, Tile.BAMBOO5,
Tile.BAMBOO6, Tile.BAMBOO8, Tile.BAMBOO9,
Tile.BAMBOO9, Tile.EAST, Tile.WEST, Tile.NORTH])
self.assertIsNone(self.context.player().decision)
# test bot
self.context.players[3].extra['bot'] = True
self.context.state = 'discarding'
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.SOUTH)
self.context.player().decision = 'doesnt-matter-for-bot'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertEqual(self.context.players[0].discarded, [])
self.assertEqual(self.context.players[1].discarded, [Tile.CHAR8])
self.assertEqual(self.context.players[2].discarded, [Tile.EAST, Tile.BAMBOO7])
self.assertEqual(self.context.players[3].discarded, [Tile.SOUTH])
self.assertEqual(self.context.discarded_pool, [Tile.CHAR8, Tile.EAST, Tile.BAMBOO7, Tile.SOUTH])
self.assertIsNone(self.context.player().hand.last_tile)
self.assertEqual(self.context.player().hand.free_tiles,
[Tile.CHAR5, Tile.CHAR5, Tile.CHAR6,
Tile.CHAR7, Tile.CHAR8, Tile.CIRCLE3,
Tile.CIRCLE3, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO6, Tile.BAMBOO7, Tile.BAMBOO8])
self.assertIsNone(self.context.player().decision)
def test_illegal_type(self):
# test the case where player's decision is not a tile
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.GREEN)
self.context.player().decision = 'invalid-input'
orig_context = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, orig_context)
def test_illegal_tile(self):
# test the case where player tries to discard a tile that he doesn't own
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.GREEN)
self.context.player().decision = Tile.WEST
orig_context = self.context.clone()
self.assertFalse(flow.next(self.context))
self.assertEqual(self.context, orig_context)
class TestDiscardedHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'discarded'
self.context.wall = Wall()
hands = [p.hand for p in self.context.players]
hands[0].add_free_tiles([Tile.CHAR1])
hands[1].add_free_tiles([Tile.CHAR2, Tile.CHAR4, Tile.CIRCLE1, Tile.CIRCLE1])
hands[2].add_free_tiles([Tile.CHAR3, Tile.CHAR3, Tile.CHAR3, Tile.CHAR2])
hands[3].add_free_tiles([Tile.CHAR4, Tile.CIRCLE4])
def test_4_kong_viable_deicsions(self):
self.context.settings.patterns_win.append('four-kongs')
self.context.players[0].hand.free_tiles = [Tile.CHAR1, Tile.CHAR1, Tile.CHAR1, Tile.RED]
self.context.players[0].hand.fixed_groups = [
TileGroup([Tile.CHAR2] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.CHAR3] * 4, TileGroup.KONG_EXPOSED),
TileGroup([Tile.CHAR4] * 4, TileGroup.KONG_CONCEALED)
]
self.context.players[2].hand.free_tiles = [Tile.GREEN, Tile.WHITE]
draw_for_player(self.context, 2, tile=Tile.CHAR1)
self.context.discard(Tile.CHAR1)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'melding')
self.assertEqual(self.context.players[0].extra.get('viable_decisions'),
['win', 'pong', 'skip'])
def test_4_wind_tie(self):
draw_for_player(self.context, 0, tile=Tile.EAST)
draw_for_player(self.context, 1, tile=Tile.EAST)
draw_for_player(self.context, 2, tile=Tile.EAST)
self.context.discard(Tile.EAST, 0)
self.context.discard(Tile.EAST, 1)
self.context.discard(Tile.EAST, 2)
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.EAST)
self.context.discard(Tile.EAST, 3)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertFalse(self.context.winners)
self.assertEqual(self.context.extra.get('tie_type'), '4-wind')
def test_4_waiting_tie(self):
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.EAST)
self.context.discard(Tile.EAST)
for player in self.context.players:
player.extra['declared_ready'] = True
orig_context = self.context.clone()
# tie_on_4_waiting is off, should go to 'drawing'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.context = orig_context
self.context.settings.tie_on_4_waiting = True
# tie_on_4_waiting is on
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertFalse(self.context.winners)
self.assertEqual(self.context.extra.get('tie_type'), '4-waiting')
def test_declarable_off(self):
self.context.settings.declarable = False
draw_for_player(self.context, 0, last_player_idx=2, tile=Tile.CHAR3)
self.context.discard(Tile.CHAR3)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'melding')
self.assertEqual(self.context.cur_player_idx, 0)
self.assertEqual(self.context.last_player_idx, 2)
def test_declare_ready(self):
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.EAST)
self.context.discard(Tile.EAST)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [['declare', 'skip'], None, None, None])
self.assertEqual(self.context.cur_player_idx, 0)
self.assertEqual(self.context.last_player_idx, 3)
self.assertFalse(self.context.player().extra.get('declared_ready'))
self.context.players[0].decision = 'declare'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertEqual(self.context.cur_player_idx, 0)
self.assertEqual(self.context.last_player_idx, 3)
self.assertTrue(self.context.player().extra.get('declared_ready'))
self.assertEqual(self.context.player().extra.get('waiting_tiles'),
[Tile.CHAR1])
self.assertIsNone(self.context.players[0].decision)
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.EAST)
self.context.discard(Tile.EAST)
self.context.players[1].decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.last_player_idx, 1)
self.assertFalse(self.context.players[1].extra.get('declared_ready'))
self.assertIsNone(self.context.players[1].decision)
def test_someone_can_meld(self):
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR3)
self.context.discard(Tile.CHAR3)
self.context.player().decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarded')
self.assertIsNone(self.context.player().decision)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'melding')
viable_decisions = [player.extra.get('viable_decisions') for player in self.context.players]
self.assertEqual(viable_decisions, [None, ['win', 'chow', 'skip'], ['kong', 'pong', 'skip'], None])
def test_bad_context(self):
self.assertFalse(flow.next(self.context))
class TestMeldingHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'melding'
self.context.wall = Wall()
players = [p for p in self.context.players]
hands = [p.hand for p in players]
# lousy hand for player 0
hands[0].add_free_tiles([Tile.CHAR2, Tile.CHAR4, Tile.CHAR9,
Tile.CIRCLE9, Tile.BAMBOO1, Tile.BAMBOO4,
Tile.EAST, Tile.SOUTH, Tile.WEST,
Tile.NORTH, Tile.RED, Tile.GREEN, Tile.GREEN])
# ready hand for player 1 - waiting for CHAR6 and CHAR9
hands[1].add_free_tiles([Tile.CHAR2, Tile.CHAR2, Tile.CHAR7,
Tile.CHAR8, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH,
Tile.SOUTH, Tile.WHITE, Tile.WHITE, Tile.WHITE])
# lousy hand for player 2
hands[2].add_free_tiles([Tile.CIRCLE1, Tile.CHAR6, Tile.CHAR6,
Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9,
Tile.EAST, Tile.EAST, Tile.WEST, Tile.NORTH])
# ready hand for player 3 - waiting for CHAR5 and CIRCLE3
hands[3].add_free_tiles([Tile.CHAR5, Tile.CHAR5, Tile.CHAR6,
Tile.CHAR7, Tile.CHAR8, Tile.CIRCLE3,
Tile.CIRCLE3, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO6, Tile.BAMBOO7, Tile.BAMBOO8])
draw_for_player(self.context, 0, tile=Tile.CHAR6)
self.context.discard(Tile.CHAR6)
players[1].extra['viable_decisions'] = ['win', 'chow', 'skip']
players[2].extra['viable_decisions'] = ['pong', 'skip']
def test_skip(self):
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'chow', 'skip'], ['pong', 'skip'], None])
self.context.players[1].decision = 'skip'
result = flow.next(self.context)
self.assertFalse(result)
self.context.players[2].decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.last_player_idx, 0)
self.assertEqual(self.context.discarded_pool, [Tile.CHAR6])
self.assertEqual(self.context.extra, {})
self.assertEqual(self.context.players[0].extra, {})
self.assertEqual(self.context.players[1].extra, { 'water': [Tile.CHAR6, Tile.CHAR9] })
self.assertEqual(self.context.players[2].extra, {})
self.assertEqual(self.context.players[3].extra, {})
self.assertIsNone(self.context.players[0].decision)
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
def test_tie(self):
self.context.settings.tie_on_4_kongs = True
# already 3 kongs on the table
kong_group = TileGroup([Tile.RED] * 4, TileGroup.KONG_EXPOSED)
self.context.players[0].hand.fixed_groups.append(kong_group)
self.context.players[1].hand.fixed_groups.append(kong_group)
self.context.players[2].hand.fixed_groups.append(kong_group)
# player 3 draws and discards a WHITE
draw_for_player(self.context, 3, tile=Tile.WHITE)
self.context.discard(Tile.WHITE)
self.context.players[1].extra['viable_decisions'] = ['kong', 'pong', 'skip']
del self.context.players[2].extra['viable_decisions']
# cannot go to next state because player 1 hasn't make a decision
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[None, ['kong', 'pong', 'skip'], None, None])
# player 1 decides to kong -> 4-kong tie
self.context.players[1].decision = 'kong'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertFalse(self.context.winners)
self.assertEqual(self.context.extra.get('tie_type'), '4-kong')
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
def test_single_win(self):
self.context.players[1].decision = 'win'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'melded')
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
def test_win_bot(self):
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions, [None, ['win', 'chow', 'skip'], ['pong', 'skip'], None])
self.context.players[1].extra['bot'] = True
self.context.players[2].extra['bot'] = True
# bot chooses to win for player 1
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1])
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
def test_single_win_prority(self):
# player 1~3 all wait for CHAR8
self.context.players[1].hand.free_tiles = [Tile.CHAR8]
self.context.players[2].hand.free_tiles = [Tile.CHAR8]
self.context.players[3].hand.free_tiles = [Tile.CHAR8]
self.context.players[1].extra['viable_decisions'] = ['win', 'skip']
self.context.players[2].extra['viable_decisions'] = ['win', 'skip']
self.context.players[3].extra['viable_decisions'] = ['win', 'skip']
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[None, ['win', 'skip'], ['win', 'skip'], ['win', 'skip']])
# player 3 decides to win
self.context.players[3].decision = 'win'
# still have to wait for player 1
self.assertFalse(flow.next(self.context))
# player 1 decides to skip
self.context.players[1].decision = 'skip'
# still have to wait for player 2
self.assertFalse(flow.next(self.context))
# player 2 decides to win
self.context.players[2].decision = 'win'
# final winner: player 2
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [2])
self.assertEqual(self.context.players[2].extra.get('win_type'), 'melded')
self.assertEqual(self.context.players[2].extra.get('chucker'), 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertFalse(self.context.players[3].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
def test_multi_wins(self):
self.context.settings.multi_winners = True
# player 1~3 all wait for CHAR8
self.context.players[1].hand.free_tiles = [Tile.CHAR8]
self.context.players[2].hand.free_tiles = [Tile.CHAR8]
self.context.players[3].hand.free_tiles = [Tile.CHAR8]
self.context.players[1].extra['viable_decisions'] = ['win', 'skip']
self.context.players[2].extra['viable_decisions'] = ['win', 'skip']
self.context.players[3].extra['viable_decisions'] = ['win', 'skip']
# player 0 discards a CHAR8
draw_for_player(self.context, 0, last_player_idx=3, tile=Tile.CHAR8)
self.context.discard(Tile.CHAR8)
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[None, ['win', 'skip'], ['win', 'skip'], ['win', 'skip']])
# player 1 and 3 decide to win
self.context.players[1].decision = 'win'
self.context.players[3].decision = 'win'
# still have to wait for player 2
result = flow.next(self.context)
self.assertFalse(result)
# player 1 and 3 decide to win, player 2 skips
self.context.players[2].decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1, 3])
self.assertEqual(self.context.players[1].extra.get('win_type'), 'melded')
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
self.assertEqual(self.context.players[3].extra.get('win_type'), 'melded')
self.assertEqual(self.context.players[3].extra.get('chucker'), 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertFalse(self.context.players[3].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
def test_kong(self):
draw_for_player(self.context, 3, tile=Tile.WHITE)
self.context.discard(Tile.WHITE)
self.context.players[1].extra['viable_decisions'] = ['kong', 'pong', 'skip']
del self.context.players[2].extra['viable_decisions']
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[None, ['kong', 'pong', 'skip'], None, None])
self.context.players[1].decision = 'kong'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.last_player_idx, 3)
self.assertEqual(self.context.players[1].hand.fixed_groups,
[TileGroup([Tile.WHITE] * 4, TileGroup.KONG_EXPOSED)])
self.assertEqual(self.context.players[1].hand.free_tiles,
[Tile.CHAR2, Tile.CHAR2, Tile.CHAR7, Tile.CHAR8,
Tile.BAMBOO2, Tile.BAMBOO3, Tile.BAMBOO4, Tile.SOUTH,
Tile.SOUTH, Tile.SOUTH])
self.assert_cleaned(self.context)
def test_pong(self):
self.context.players[1].decision = 'chow'
self.context.players[2].decision = 'pong'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.last_player_idx, 0)
self.assertEqual(self.context.players[2].hand.fixed_groups,
[TileGroup([Tile.CHAR6] * 3, TileGroup.PONG)])
self.assertEqual(self.context.players[2].hand.free_tiles,
[Tile.CIRCLE1, Tile.BAMBOO5, Tile.BAMBOO6, Tile.BAMBOO7,
Tile.BAMBOO8, Tile.BAMBOO9, Tile.BAMBOO9, Tile.EAST,
Tile.EAST, Tile.WEST, Tile.NORTH])
self.assertEqual(self.context.extra, {})
self.assertEqual(self.context.players[0].extra, {})
self.assertEqual(self.context.players[1].extra, { 'water': [Tile.CHAR6, Tile.CHAR9] })
self.assertEqual(self.context.players[2].extra, {})
self.assertEqual(self.context.players[3].extra, {})
self.assertIsNone(self.context.players[0].decision)
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
def test_bot_skips_melding(self):
# dumb bot doesn't kong, pong, nor chow
self.context.players[1].extra['bot'] = True
self.context.players[2].extra['bot'] = True
self.context.players[1].extra['viable_decisions'] = ['chow', 'skip']
self.context.players[2].extra['viable_decisions'] = ['kong', 'pong', 'skip']
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.last_player_idx, 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
def test_declared_ready_melding(self):
# after a player declared, he doesn't kong, pong, nor chow
self.context.players[1].extra['declared_ready'] = True
self.context.players[2].extra['declared_ready'] = True
self.context.players[1].extra['viable_decisions'] = ['chow', 'skip']
self.context.players[2].extra['viable_decisions'] = ['kong', 'pong', 'skip']
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'drawing')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.last_player_idx, 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
def test_declared_ready_winning(self):
# after a player declared, he can only win
self.context.settings.multi_winners = True
self.context.players[1].extra['declared_ready'] = True
self.context.players[2].extra['declared_ready'] = True
self.context.players[1].extra['waiting_tiles'] = [Tile.CHAR6]
self.context.players[2].extra['waiting_tiles'] = [Tile.CHAR6]
self.context.players[1].extra['viable_decisions'] = ['win', 'chow', 'skip']
self.context.players[2].extra['viable_decisions'] = ['win', 'pong', 'skip']
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'end')
self.assertEqual(self.context.winners, [1, 2])
self.assertEqual(self.context.players[1].extra.get('chucker'), 0)
self.assertEqual(self.context.players[2].extra.get('chucker'), 0)
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
def test_single_chow(self):
self.context.players[1].decision = 'chow'
self.context.players[2].decision = 'skip'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 1)
self.assertEqual(self.context.last_player_idx, 0)
self.assertEqual(self.context.discarded_pool, [])
self.assertEqual(self.context.players[1].hand.fixed_groups,
[TileGroup([Tile.CHAR6, Tile.CHAR7, Tile.CHAR8], TileGroup.CHOW)])
self.assertEqual(self.context.players[1].hand.free_tiles,
[Tile.CHAR2, Tile.CHAR2, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.SOUTH, Tile.SOUTH, Tile.SOUTH,
Tile.WHITE, Tile.WHITE, Tile.WHITE])
self.assertEqual(self.context.extra, {})
self.assertEqual(self.context.players[0].extra, {})
self.assertEqual(self.context.players[1].extra, { 'water': [Tile.CHAR6, Tile.CHAR9] })
self.assertEqual(self.context.players[2].extra, {})
self.assertEqual(self.context.players[3].extra, {})
self.assertIsNone(self.context.players[0].decision)
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
def test_multi_chow(self):
draw_for_player(self.context, 1, last_player_idx=0, tile=Tile.BAMBOO6)
self.context.discard(Tile.BAMBOO6)
del self.context.players[1].extra['viable_decisions']
self.context.players[2].extra['viable_decisions'] = ['chow', 'skip']
self.context.players[2].decision = 'chow'
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'chowing')
self.assertEqual(self.context.cur_player_idx, 2)
self.assertEqual(self.context.last_player_idx, 1)
self.assertEqual(self.context.player().extra.get('chow_combs'),
[(Tile.BAMBOO5, Tile.BAMBOO7), (Tile.BAMBOO7, Tile.BAMBOO8)])
# viable_decisions in player.extra should be cleaned up afterward
self.assertFalse(self.context.players[1].extra.get('viable_decisions'))
self.assertFalse(self.context.players[2].extra.get('viable_decisions'))
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
def test_bad_context(self):
del self.context.discarded_pool[:]
self.assertFalse(flow.next(self.context))
self.context.discarded_pool.append(Tile.RED)
self.assertFalse(flow.next(self.context))
self.context.discarded_pool[0] = Tile.CHAR6
del self.context.players[1].extra['viable_decisions']
del self.context.players[2].extra['viable_decisions']
self.assertFalse(flow.next(self.context))
def assert_cleaned(self, context):
# check if the context is cleaned up
self.assertEqual(self.context.extra, {})
self.assertEqual(self.context.players[0].extra, {})
self.assertEqual(self.context.players[1].extra, {})
self.assertEqual(self.context.players[2].extra, {})
self.assertEqual(self.context.players[3].extra, {})
self.assertIsNone(self.context.players[0].decision)
self.assertIsNone(self.context.players[1].decision)
self.assertIsNone(self.context.players[2].decision)
self.assertIsNone(self.context.players[3].decision)
class TestChowingHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'melding'
self.context.wall = Wall()
hands = [player.hand for player in self.context.players]
hands[0].add_free_tiles([Tile.CHAR1, Tile.CHAR2, Tile.CHAR3,
Tile.CHAR1, Tile.CHAR2, Tile.CHAR3,
Tile.CHAR4, Tile.CHAR5, Tile.CHAR6])
hands[1].add_free_tiles([Tile.CIRCLE1, Tile.CIRCLE2, Tile.CIRCLE3,
Tile.CIRCLE4, Tile.CIRCLE5, Tile.CIRCLE6])
hands[2].add_free_tiles([Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.BAMBOO4, Tile.BAMBOO5, Tile.BAMBOO6])
hands[3].add_free_tiles([Tile.CHAR4, Tile.CHAR5, Tile.CHAR6,
Tile.CHAR7, Tile.CHAR8, Tile.CHAR9])
draw_for_player(self.context, 3, last_player_idx=2, tile=Tile.CHAR3)
self.context.discard(Tile.CHAR3)
self.context.players[0].extra['viable_decisions'] = ['pong', 'chow', 'skip']
self.context.players[0].decision = 'chow'
flow.next(self.context)
def test_multi_chow(self):
self.assertEqual(self.context.state, 'chowing')
result = flow.next(self.context)
self.assertFalse(result)
self.assertEqual(result.viable_decisions,
[[(Tile.CHAR1, Tile.CHAR2), (Tile.CHAR2, Tile.CHAR4), (Tile.CHAR4, Tile.CHAR5)],
None, None, None])
# invalid decision
self.context.players[0].decision = (Tile.CHAR2, Tile.CHAR3)
self.assertFalse(flow.next(self.context))
# chow CHAR2 CHAR3 CHAR4
self.context.players[0].decision = (Tile.CHAR2, Tile.CHAR4)
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'discarding')
self.assertEqual(self.context.cur_player_idx, 0)
self.assertEqual(self.context.last_player_idx, 3)
self.assertEqual(self.context.player().hand.fixed_groups,
[TileGroup([Tile.CHAR2, Tile.CHAR3, Tile.CHAR4], TileGroup.CHOW)])
self.assertEqual(self.context.player().hand.free_tiles,
[Tile.CHAR1, Tile.CHAR1, Tile.CHAR2, Tile.CHAR3,
Tile.CHAR3, Tile.CHAR5, Tile.CHAR6])
def test_bad_context(self):
orig_context = self.context.clone()
del self.context.discarded_pool[:]
self.assertFalse(flow.next(self.context))
self.context = orig_context.clone()
del self.context.player().extra['chow_combs']
self.assertFalse(flow.next(self.context))
self.context = orig_context
self.context.player(-1).discarded[0] = Tile.RED
self.assertFalse(flow.next(self.context))
class TestEndHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'end'
self.context.wall = Wall()
hands = [player.hand for player in self.context.players]
hands[0].add_free_tiles([Tile.CHAR1])
hands[1].add_free_tiles([Tile.CHAR2])
hands[2].add_free_tiles([Tile.CHAR3])
hands[3].add_free_tiles([Tile.CHAR4])
draw_for_player(self.context, 0, last_player_idx=2, tile=Tile.CHAR1)
self.context.winners = [0]
self.context.players[0].extra['win_type'] = 'self-picked'
def test_score(self):
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'scored')
match_result = MatchResult((1, 2, 3), multiplier=1)
self.assertEqual(self.context.players[0].extra.get('patterns_matched'), {
'all-pongs': match_result,
'heaven-win': match_result,
'same-suit': match_result,
'waiting-for-one': MatchResult((1, 2, 3), multiplier=1, extra='eye')
})
# TODO: more tests...
def test_bad_context(self):
orig_context = self.context.clone()
del self.context.players[0].extra['win_type']
self.assertFalse(flow.next(self.context))
self.context = orig_context
self.context.winners = None
self.assertFalse(flow.next(self.context))
class TestScoredHandler(unittest.TestCase):
def setUp(self):
self.context = GameContext()
self.context.state = 'scored'
self.context.wall = Wall()
# deal some tiles
for player in self.context.players:
hand = player.hand
for __ in xrange(13):
tile = self.context.wall.draw()
hand.add_free_tile(tile)
def test_round0_match0_dealer0_winner0(self):
self.context.round = 0
self.context.match = 0
self.context.dealer = 0
self.context.winners = [0]
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 0)
self.assertEqual(self.context.match, 1)
self.assertEqual(self.context.dealer, 0)
self.assertEqual(self.context.dealer_defended, 1)
def test_round1_match3_dealer0_winner2(self):
self.context.round = 1
self.context.match = 3
self.context.dealer = 0
self.context.winners = [2]
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 1)
self.assertEqual(self.context.match, 4)
self.assertEqual(self.context.dealer, 1)
self.assertEqual(self.context.dealer_defended, 0)
def test_round2_match4_dealer0_no_winner(self):
self.context.round = 2
self.context.match = 4
self.context.dealer = 0
self.context.winners = None
self.context.dealer_defended = 2
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 2)
self.assertEqual(self.context.match, 5)
self.assertEqual(self.context.dealer, 0)
self.assertEqual(self.context.dealer_defended, 3)
def test_round1_match5_dealer3_winner1_2(self):
self.context.round = 1
self.context.match = 5
self.context.dealer = 3
self.context.winners = [1, 2]
self.context.dealer_defended = 1
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 2)
self.assertEqual(self.context.match, 0)
self.assertEqual(self.context.dealer, 0)
self.assertEqual(self.context.dealer_defended, 0)
def test_round0_match8_dealer3_no_winner(self):
self.context.round = 0
self.context.match = 8
self.context.dealer = 3
self.context.winners = None
self.context.dealer_defended = 3
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 0)
self.assertEqual(self.context.match, 9)
self.assertEqual(self.context.dealer, 3)
self.assertEqual(self.context.dealer_defended, 4)
def test_max_dealer_defended(self):
self.context.settings.max_dealer_defended = 3
self.context.round = 1
self.context.match = 2
self.context.dealer = 2
self.context.winners = None
self.context.dealer_defended = 3
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 1)
self.assertEqual(self.context.match, 3)
self.assertEqual(self.context.dealer, 3)
self.assertEqual(self.context.dealer_defended, 0)
def test_max_dealer_defended2(self):
self.context.settings.max_dealer_defended = 3
self.context.round = 1
self.context.match = 2
self.context.dealer = 2
self.context.winners = [2]
self.context.dealer_defended = 2
self.assertTrue(flow.next(self.context))
self.assertEqual(self.context.state, 'start')
self.assert_empty_hands(self.context)
self.assertEqual(self.context.round, 1)
self.assertEqual(self.context.match, 3)
self.assertEqual(self.context.dealer, 2)
self.assertEqual(self.context.dealer_defended, 3)
def assert_empty_hands(self, context):
empty_hand = Hand()
self.assertEqual(context.players[0].hand, empty_hand)
self.assertEqual(context.players[1].hand, empty_hand)
self.assertEqual(context.players[2].hand, empty_hand)
self.assertEqual(context.players[3].hand, empty_hand)
self.assertEqual(context.players[0].discarded, [])
self.assertEqual(context.players[1].discarded, [])
self.assertEqual(context.players[2].discarded, [])
self.assertEqual(context.players[3].discarded, [])
self.assertEqual(context.players[0].extra, {})
self.assertEqual(context.players[1].extra, {})
self.assertEqual(context.players[2].extra, {})
self.assertEqual(context.players[3].extra, {})
self.assertEqual(context.cur_player_idx, 0)
self.assertIsNone(context.last_player_idx)
self.assertIsNone(context.last_discarded())
self.assertFalse(context.winners)
self.assertIsNone(context.player().decision)
self.assertEqual(context.extra, {})
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def draw_for_player(context, player_idx, last_player_idx=None, tile=None):
context.last_player_idx = last_player_idx
context.cur_player_idx = player_idx
hand = context.player().hand
tile_from_wall = context.wall.draw()
hand.last_tile = tile or tile_from_wall
| {
"content_hash": "f0e6ac001bce83ae5d15024cc1c04245",
"timestamp": "",
"source": "github",
"line_count": 1775,
"max_line_length": 116,
"avg_line_length": 45.36394366197183,
"alnum_prop": 0.6361570273593224,
"repo_name": "eliangcs/mahjong",
"id": "a588851760744fd7b001ff4bc8b7c0fc4986aef2",
"size": "80521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "351485"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference.rules import (
ImportOwnerStatus,
PythonImportDependenciesInferenceFieldSet,
import_rules,
)
from pants.backend.python.goals import debug_goals
from pants.backend.python.goals.debug_goals import PythonSourceAnalysis
from pants.backend.python.macros import python_requirements
from pants.backend.python.macros.python_requirements import PythonRequirementsTargetGenerator
from pants.backend.python.target_types import (
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
)
from pants.build_graph.address import Address
from pants.core.target_types import FileTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.internals.parametrize import Parametrize
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def imports_rule_runner() -> RuleRunner:
resolves = {"python-default": "", "other": ""}
rule_runner = RuleRunner(
rules=[
*import_rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
*python_requirements.rules(),
*debug_goals.rules(),
QueryRule(PythonSourceAnalysis, [PythonImportDependenciesInferenceFieldSet]),
],
target_types=[
PythonSourceTarget,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
PythonRequirementsTargetGenerator,
FileTarget,
],
objects={"parametrize": Parametrize},
)
rule_runner.set_options(
[
"--python-infer-assets",
"--python-enable-resolves",
f"--python-resolves={resolves}",
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
return rule_runner
def test_debug_goals(imports_rule_runner: RuleRunner):
filedir = "project"
filename = "t.py"
imports_rule_runner.write_files(
{
str(Path(filedir, filename)): dedent(
f"""\
import json # unownable, root level
import os.path # unownable, not root level
import stuff # dependency missing
import watchdog # dependency included in other resolve
import yaml # dependency included
try:
import weakimport # weakimport missing
except ImportError:
...
open("missing.json")
# missing asset
open("{filedir}/config.json")
# asset
"""
),
str(Path(filedir, "BUILD")): dedent(
f"""\
python_source(
name="t",
source="t.py",
dependencies=["//{filedir}:config"],
resolve="python-default",
)
file(
name="config",
source="config.json",
)
python_requirement(
name="imported",
requirements=["pyyaml"],
)
python_requirement(
name="other",
requirements=["watchdog"],
resolve="other",
)
"""
),
str(Path(filedir, "config.json")): "",
}
)
tgt = imports_rule_runner.get_target(Address(filedir, target_name="t"))
result = imports_rule_runner.request(
PythonSourceAnalysis, (PythonImportDependenciesInferenceFieldSet.create(tgt),)
)
assert result
assert len(result.identified.imports) == 6
assert (
len([i for i in result.identified.imports.values() if i.weak]) == 1
), "did not find the weak import"
assert len(result.identified.assets) == 1
assert (
result.resolved.assets[str(Path(filedir, "config.json"))].status
== ImportOwnerStatus.unambiguous
)
# possible owners
assert result.resolved.resolve_results["watchdog"].status == ImportOwnerStatus.unowned
assert result.possible_owners.value["watchdog"]
| {
"content_hash": "9aa8a83d7f04927364023367f13900c8",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 93,
"avg_line_length": 31.644927536231883,
"alnum_prop": 0.5804900389283261,
"repo_name": "pantsbuild/pants",
"id": "c149377dca12403a31b070c5d78c58c0abe78f21",
"size": "4499",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/goals/debug_goals_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.util.tf_export import keras_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def _check_pydot():
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
except Exception:
# pydot raises a generic Exception here,
# so no specific class can be caught.
raise ImportError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB'):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A `pydot.Dot` instance representing the Keras model.
"""
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.models import Sequential
from tensorflow.python.util import nest
_check_pydot()
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if isinstance(model, Sequential):
if not model.built:
model.build()
layers = model._layers
# Create graph nodes.
for layer in layers:
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
try:
outputlabels = str(layer.output_shape)
except AttributeError:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join([str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes: # pylint: disable=protected-access
for inbound_layer in nest.flatten(node.inbound_layers):
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
@keras_export('keras.utils.plot_model')
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB'):
"""Converts a Keras model to dot format and save to a file.
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| {
"content_hash": "123490bc410b15cf2b44dcf3cba65e2e",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 80,
"avg_line_length": 33.203947368421055,
"alnum_prop": 0.6532593619972261,
"repo_name": "theflofly/tensorflow",
"id": "d396851a629b80114496b4a978768d73730aecd2",
"size": "5809",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/vis_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644154"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59546729"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1507157"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46310564"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481712"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
class FlowAction(object):
# property key
TYPE = "type"
def __init__(self, type_):
self._body = {
self.TYPE: type_
}
@property
def type(self):
return self._body[self.TYPE]
| {
"content_hash": "49960249562609087b081313828a276f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 36,
"avg_line_length": 19.083333333333332,
"alnum_prop": 0.5065502183406113,
"repo_name": "haizawa/odenos",
"id": "a1a1ab59d4d544af1fbbd703f8994afe414c3d60",
"size": "1256",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "src/main/python/org/o3project/odenos/core/component/network/flow/basic/flow_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "Java",
"bytes": "4236219"
},
{
"name": "Python",
"bytes": "1502005"
},
{
"name": "Ruby",
"bytes": "789773"
},
{
"name": "Shell",
"bytes": "241400"
}
],
"symlink_target": ""
} |
import os
import time
import h5py
import torch
import torch.nn as nn
import torch.optim as optim
from device import device
import argparse
from doom_instance import *
from aac_lstm import BaseModelLSTM
def data_generator(args, screens, variables, labels, episodes):
# remove short episodes
episode_min_size = args.episode_size*args.skiprate
episodes = episodes[episodes[:, 1]-episodes[:, 0] > episode_min_size]
episodes_num = len(episodes)
#
batch_size = args.batch_size
step_idx = episodes[:, 0].copy() + np.random.randint(args.skiprate, size=episodes_num)
step_screens = np.ndarray(shape=(batch_size, *screens.shape[1:]), dtype=np.float32)
step_variables = np.ndarray(shape=(batch_size, *variables.shape[1:]), dtype=np.float32)
step_labels = np.ndarray(shape=(batch_size,), dtype=np.int)
step_terminals = np.ones(shape=(batch_size, 1), dtype=np.float32)
# select episodes for the initial batch
batch_episodes = np.random.randint(episodes_num, size=batch_size)
while True:
for i in range(batch_size):
idx = batch_episodes[i]
step_screens[i, :] = screens[step_idx[idx]] / 127.5 - 1.0
step_variables[i, :] = variables[step_idx[idx]] / 100
step_labels[i] = labels[step_idx[idx]]
step_idx[idx] += args.skiprate
if step_idx[idx] > episodes[idx][1]:
step_idx[idx] = episodes[idx][0] + np.random.randint(args.skiprate)
step_terminals[i] = 0
# reached terminal state, select a new episode
batch_episodes[i] = np.random.randint(episodes_num)
else:
step_terminals[i] = 1
yield torch.from_numpy(step_screens), \
torch.from_numpy(step_variables), \
torch.from_numpy(step_labels), \
torch.from_numpy(step_terminals)
def train(args):
data_file = h5py.File(args.h5_path, 'r')
screens = data_file['screens']
variables = data_file['variables']
labels = data_file['action_labels']
print('Dataset size =', len(screens))
action_sets = data_file['action_sets'][:]
episodes = data_file['episodes'][:]
input_shape = screens[0].shape
train_generator = data_generator(args, screens, variables, labels, episodes)
model = BaseModelLSTM(input_shape[0], len(action_sets), variables.shape[1]).to(device)
#source_model = torch.load('imitation_model_lstm_bn0.pth')
#model.load_state_dict(source_model.state_dict())
#del source_model
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=5e-4)
optimizer.zero_grad()
running_loss = 0
running_accuracy = 0
batch_time = time.time()
cp = 0
for batch, (screens, variables, labels, terminals) in enumerate(train_generator):
screens, variables, labels = screens.to(device), variables.to(device), labels.to(device)
outputs = model(screens, variables)
loss = criterion(outputs, labels)
model.set_terminal(terminals)
running_loss += loss.item()
_, pred = outputs.max(1)
accuracy = (pred == labels).float().mean()
running_accuracy += accuracy
if batch % args.episode_size == args.episode_size - 1:
loss.backward()
optimizer.step()
model.reset()
optimizer.zero_grad()
running_loss /= args.episode_size
running_accuracy /= args.episode_size
print(
'[{:d}] loss: {:.3f}, accuracy: {:.3f}, time: {:.6f}'.format(
batch + 1, running_loss, running_accuracy, time.time()-batch_time
)
)
running_loss = 0
running_accuracy = 0
batch_time = time.time()
if batch % args.checkpoint_rate == args.checkpoint_rate - 1:
cp += 1
torch.save(model, args.checkpoint_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Imitation Model Trainer')
parser.add_argument('--batch_size', type=int, default=100, help='number of game instances running in parallel')
parser.add_argument('--load', default=None, help='path to model file')
parser.add_argument('--h5_path', default=os.path.expanduser('~') + '/test/datasets/vizdoom/cig_map01/flat.h5',
help='hd5 file path')
parser.add_argument('--skiprate', type=int, default=4, help='number of skipped frames')
parser.add_argument('--frame_num', type=int, default=1, help='number of frames per input')
parser.add_argument('--checkpoint_file', default=None, help='check point file name')
parser.add_argument('--checkpoint_rate', type=int, default=5000, help='number of batches per checkpoit')
parser.add_argument('--episode_size', type=int, default=20, help='episode length')
args = parser.parse_args()
train(args)
| {
"content_hash": "846abb44ad8a09bd33b43582c3cfc169",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 115,
"avg_line_length": 40.05691056910569,
"alnum_prop": 0.6226912928759895,
"repo_name": "akolishchak/doom-net-pytorch",
"id": "353cf5c8f1f978dd2183f25bd5911425b5589467",
"size": "5007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/imitation_lstm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303488"
},
{
"name": "Shell",
"bytes": "38833"
}
],
"symlink_target": ""
} |
from xml.etree import ElementTree
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.tests import delete_all_cases, delete_all_xforms
from casexml.apps.case.xml import V2
from casexml.apps.stock.models import StockReport, StockTransaction
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
from corehq.apps.commtrack import const
from corehq.apps.groups.models import Group
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.locations.models import Location
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.domain.models import Domain
from corehq.apps.commtrack.util import get_default_requisition_config
from corehq.apps.commtrack.models import CommTrackUser, SupplyPointCase, CommtrackConfig, ConsumptionConfig
from corehq.apps.sms.backend import test
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.commtrack.models import Product
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import get_safe_write_kwargs
from casexml.apps.phone.restore import generate_restore_payload
from lxml import etree
TEST_DOMAIN = 'commtrack-test'
TEST_LOCATION_TYPE = 'location'
TEST_USER = 'commtrack-user'
TEST_NUMBER = '5551234'
TEST_PASSWORD = 'secret'
TEST_BACKEND = 'test-backend'
ROAMING_USER = {
'username': TEST_USER + '-roaming',
'phone_number': TEST_NUMBER,
'first_name': 'roaming',
'last_name': 'reporter',
'user_data': {
const.UserRequisitionRoles.REQUESTER: True,
const.UserRequisitionRoles.RECEIVER: True,
},
}
FIXED_USER = {
'username': TEST_USER + '-fixed',
'phone_number': str(int(TEST_NUMBER) + 1),
'first_name': 'fixed',
'last_name': 'reporter',
'user_data': {
const.UserRequisitionRoles.REQUESTER: True,
const.UserRequisitionRoles.RECEIVER: True,
},
'home_loc': 'loc1',
}
APPROVER_USER = {
'username': 'test-approver',
'phone_number': '5550000',
'first_name': 'approver',
'last_name': 'user',
'user_data': {
const.UserRequisitionRoles.APPROVER: True,
},
}
PACKER_USER = {
'username': 'test-packer',
'phone_number': '5550001',
'first_name': 'packer',
'last_name': 'user',
'user_data': {
const.UserRequisitionRoles.SUPPLIER: True,
},
}
def bootstrap_domain(domain_name=TEST_DOMAIN):
# little test utility that makes a commtrack-enabled domain with
# a default config and a location
domain_obj = create_domain(domain_name)
domain_obj.commtrack_enabled = True
domain_obj.save(**get_safe_write_kwargs())
return domain_obj
def bootstrap_user(setup, username=TEST_USER, domain=TEST_DOMAIN,
phone_number=TEST_NUMBER, password=TEST_PASSWORD,
backend=TEST_BACKEND, first_name='', last_name='',
home_loc=None, user_data=None,
):
user_data = user_data or {}
user = CommTrackUser.create(
domain,
username,
password,
phone_numbers=[TEST_NUMBER],
user_data=user_data,
first_name=first_name,
last_name=last_name
)
if home_loc == setup.loc.site_code:
if not SupplyPointCase.get_by_location(setup.loc):
make_supply_point(domain, setup.loc)
user.add_location(setup.loc)
user.save()
user.save_verified_number(domain, phone_number, verified=True, backend_id=backend)
return CommTrackUser.wrap(user.to_json())
def make_loc(code, name=None, domain=TEST_DOMAIN, type=TEST_LOCATION_TYPE, parent=None):
name = name or code
loc = Location(site_code=code, name=name, domain=domain, location_type=type, parent=parent)
loc.save()
return loc
class CommTrackTest(TestCase):
requisitions_enabled = False # can be overridden
user_definitions = []
def setUp(self):
# might as well clean house before doing anything
delete_all_xforms()
delete_all_cases()
StockReport.objects.all().delete()
StockTransaction.objects.all().delete()
self.backend = test.bootstrap(TEST_BACKEND, to_console=True)
self.domain = bootstrap_domain()
self.ct_settings = CommtrackConfig.for_domain(self.domain.name)
self.ct_settings.consumption_config = ConsumptionConfig(
min_transactions=0,
min_window=0,
optimal_window=60,
min_periods=0,
)
if self.requisitions_enabled:
self.ct_settings.requisition_config = get_default_requisition_config()
self.ct_settings.save()
self.domain = Domain.get(self.domain._id)
self.loc = make_loc('loc1')
self.sp = make_supply_point(self.domain.name, self.loc)
self.users = [bootstrap_user(self, **user_def) for user_def in self.user_definitions]
if False:
# bootstrap additional users for requisitions
# needs to get reinserted for requisition stuff later
self.approver = bootstrap_user(self, **APPROVER_USER)
self.packer = bootstrap_user(self, **PACKER_USER)
self.users += [self.approver, self.packer]
# everyone should be in a group.
self.group = Group(domain=TEST_DOMAIN, name='commtrack-folks',
users=[u._id for u in self.users],
case_sharing=True)
self.group.save()
self.sp.owner_id = self.group._id
self.sp.save()
self.products = sorted(Product.by_domain(self.domain.name), key=lambda p: p._id)
self.assertEqual(3, len(self.products))
def tearDown(self):
self.backend.delete()
for u in self.users:
u.delete()
self.domain.delete() # domain delete cascades to everything else
def get_commtrack_forms(self, domain):
return XFormInstance.view('reports_forms/all_forms',
startkey=['submission xmlns', domain, COMMTRACK_REPORT_XMLNS],
endkey=['submission xmlns', domain, COMMTRACK_REPORT_XMLNS, {}],
reduce=False,
include_docs=True
)
def get_ota_balance_xml(user):
xml = generate_restore_payload(user.to_casexml_user(), version=V2)
return extract_balance_xml(xml)
def extract_balance_xml(xml_payload):
balance_blocks = etree.fromstring(xml_payload).findall('{http://commcarehq.org/ledger/v1}balance')
if balance_blocks:
return [etree.tostring(bb) for bb in balance_blocks]
return []
| {
"content_hash": "c7a013b774efc78035c8c6c64f4bd02e",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 107,
"avg_line_length": 35.52173913043478,
"alnum_prop": 0.6623317013463892,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "31df3209bb74ef2b8330c41fd4c537c53ea2da39",
"size": "6536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/commtrack/tests/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import shutil
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestRemoveThenAdd(WatchmanTestCase.WatchmanTestCase):
def checkOSApplicability(self):
if os.name == "linux" and os.getenv("TRAVIS"):
self.skipTest("openvz and inotify unlinks == bad time")
def test_remove_then_add(self):
root = self.mkdtemp()
os.mkdir(os.path.join(root, "foo"))
self.watchmanCommand("watch", root)
self.touchRelative(root, "foo", "222")
os.mkdir(os.path.join(root, "foo", "bar"))
self.assertFileList(root, files=["foo", "foo/bar", "foo/222"])
shutil.rmtree(os.path.join(root, "foo", "bar"))
self.removeRelative(root, "foo", "222")
shutil.rmtree(os.path.join(root, "foo"))
self.assertFileList(root, files=[])
os.mkdir(os.path.join(root, "foo"))
self.touchRelative(root, "foo", "222")
self.assertFileList(root, files=["foo", "foo/222"])
| {
"content_hash": "7bac5ab15638cf862b55a39cd21e5658",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 29.914285714285715,
"alnum_prop": 0.6341929321872015,
"repo_name": "wez/watchman",
"id": "3413153c1cf15d1021ede36dadfa603d1f0de5d3",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_remove_then_add.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68354"
},
{
"name": "C++",
"bytes": "1017051"
},
{
"name": "CMake",
"bytes": "33772"
},
{
"name": "CSS",
"bytes": "42513"
},
{
"name": "HTML",
"bytes": "36593"
},
{
"name": "Java",
"bytes": "165025"
},
{
"name": "JavaScript",
"bytes": "35291"
},
{
"name": "Python",
"bytes": "677902"
},
{
"name": "Ruby",
"bytes": "21741"
},
{
"name": "Rust",
"bytes": "69015"
},
{
"name": "Shell",
"bytes": "13265"
},
{
"name": "Thrift",
"bytes": "32316"
}
],
"symlink_target": ""
} |
"""
Invoker that uses a single core or CPU respectively.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.concurrent.invoker.invoker import Invoker
from metaopt.core.call.call import call
from metaopt.core.stoppable.stoppable import stoppable
class SingleProcessInvoker(Invoker):
"""Invoker that does the work on its own."""
def __init__(self):
super(SingleProcessInvoker, self).__init__()
@stoppable
def invoke(self, caller, fargs, **kwargs):
"""Calls back to self._caller.on_result() for call(f, fargs)."""
self._caller = caller
del caller
try:
value = call(self.f, fargs)
self._caller.on_result(value=value, fargs=fargs, **kwargs)
except Exception as value:
self._caller.on_error(value=value, fargs=fargs, **kwargs)
def wait(self):
"""Blocks till all invoke, on_error or on_result calls are done."""
pass
| {
"content_hash": "162eb8cf97edd49211806468a9900d3c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 31.21212121212121,
"alnum_prop": 0.6533980582524271,
"repo_name": "cigroup-ol/metaopt",
"id": "6668c46fa17165dcdeb8402ff68bda80f13b577b",
"size": "1054",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "metaopt/concurrent/invoker/singleprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4967"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Makefile",
"bytes": "4738"
},
{
"name": "Python",
"bytes": "226232"
}
],
"symlink_target": ""
} |
import json
import sublime
from ..modules import session
class SessionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, session.Session):
return {
"name": obj.name,
"windows": [
WindowEncoder.default(self, w) for w in obj.windows
]
}
return json.JSONEncoder.default(self, obj)
class WindowEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, session.Window):
return {
"project": obj.project,
"project_path": obj.project_path,
"views": [
ViewEncoder.default(self, v) for v in obj.views
]
}
return json.JSONEncoder.default(self, obj)
class ViewEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, session.View):
return {
"file_path": obj.file_path,
"active": obj.active,
"sel_regions": [
RegionEncoder.default(self, r) for r in obj.sel_regions
],
"visible_region": RegionEncoder.default(self, obj.visible_region)
}
return json.JSONEncoder.default(self, obj)
class RegionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, sublime.Region):
return (obj.a, obj.b)
return json.JSONEncoder.default(self, obj)
| {
"content_hash": "5c490487171c823b31dde07cd3ba789d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 81,
"avg_line_length": 27.833333333333332,
"alnum_prop": 0.5409181636726547,
"repo_name": "Zeeker/sublime-SessionManager",
"id": "2ac750a8c19fea6647493ce75105a9d5a204878d",
"size": "1503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "json/encoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15080"
}
],
"symlink_target": ""
} |
import subprocess
def func_exec_stdout(app, *args):
cmd = app
if args:
cmd += ' ' + ' '.join(args)
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return p.stdout
def func_exec_run(app, *args):
return func_exec_stdout(app, *args).decode('utf-8')
def func_exec(app, *args):
cmd = app
if args:
cmd += ' ' + ' '.join(args)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False)
return p.stdout.read() | {
"content_hash": "3026346a688dbfb3949a2eb4bfe0f0bc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 131,
"avg_line_length": 29.68421052631579,
"alnum_prop": 0.6418439716312057,
"repo_name": "mainulhossain/phenowl",
"id": "2da57df87ef516010e261f50e26a6397c5ff4e33",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exechelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47342"
},
{
"name": "Python",
"bytes": "307820"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
from alertaclient.api import Client
client = Client()
try:
id, alert, message = client.send_alert(
resource='web-server-01',
event='HttpError',
correlate=['HttpOK'],
group='Web',
environment='Production',
service=['theguardian.com'],
severity='major',
value='Bad Gateway (502)',
text='Web server error.',
tags=['web', 'dc1', 'london'],
attributes={'customer': 'The Guardian'}
)
print(alert)
except Exception as e:
print(e)
| {
"content_hash": "8855c3c74c9a6a4f9f5e14a62861f888",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 47,
"avg_line_length": 25.19047619047619,
"alnum_prop": 0.5689981096408318,
"repo_name": "alerta/python-alerta",
"id": "6d7a010e04e375c4b35cddbe27d08c3b4501d387",
"size": "552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/send.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106941"
}
],
"symlink_target": ""
} |
"""
I am a support module for making SOCKSv4 servers with twistd.
"""
from __future__ import print_function
from twisted.protocols import socks
from twisted.python import usage
from twisted.application import internet
class Options(usage.Options):
synopsis = "[-i <interface>] [-p <port>] [-l <file>]"
optParameters = [["interface", "i", "127.0.0.1", "local interface to which we listen"],
["port", "p", 1080, "Port on which to listen"],
["log", "l", None, "file to log connection data to"]]
compData = usage.Completions(
optActions={"log": usage.CompleteFiles("*.log"),
"interface": usage.CompleteNetInterfaces()}
)
longdesc = "Makes a SOCKSv4 server."
def makeService(config):
if config["interface"] != "127.0.0.1":
print()
print("WARNING:")
print(" You have chosen to listen on a non-local interface.")
print(" This may allow intruders to access your local network")
print(" if you run this on a firewall.")
print()
t = socks.SOCKSv4Factory(config['log'])
portno = int(config['port'])
return internet.TCPServer(portno, t, interface=config['interface'])
| {
"content_hash": "d46db5162fd3f0431fa5c51d1b4af57b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 91,
"avg_line_length": 35.794117647058826,
"alnum_prop": 0.6211996713229252,
"repo_name": "ntuecon/server",
"id": "59ea85c855150204efb892db43426c31ba999d0e",
"size": "1292",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/twisted/tap/socks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
} |
import tkinter
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import EGSE.IF
import UI.TKI
#############
# constants #
#############
COLOR_BUTTON_FG = "#FFFFFF"
COLOR_BUTTON_BG = "#808080"
COLOR_INITIALISED = "#FFFF00"
COLOR_CONNECTED = "#00FF00"
COLOR_ON_OK = "#00FF00"
COLOR_ON_NOK = "#FF0000"
###########
# classes #
###########
# =============================================================================
class GUIview(UI.TKI.GUItabView):
"""Implementation of the SCOE EGSE GUI layer"""
# ---------------------------------------------------------------------------
def __init__(self, master):
"""Initialise all GUI elements"""
UI.TKI.GUItabView.__init__(self, master, "EGSE", "EGSE interface to CCS")
# checkbuttons
self.checkButtons = UI.TKI.Checkbuttons(self,
[["ACK1", self.ack1Callback, True, COLOR_ON_OK],
["NAK1", self.nak1Callback, False, COLOR_ON_NOK],
["ACK2", self.ack2Callback, True, COLOR_ON_OK],
["NAK2", self.nak2Callback, False, COLOR_ON_NOK]])
self.appGrid(self.checkButtons,
row=0,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.W)
# EGSE protocol
self.egseProtocolField = UI.TKI.ValueField(self, row=1, label="EGSE protocol:")
self.egseProtocolField.set(EGSE.IF.s_serverConfiguration.egseProtocol)
# CCS interface status
self.ccsStatusField = UI.TKI.ValueField(self, row=2, label="CCS interface status:")
self.ccsStatusField.set("INIT")
self.ccsStatusField.setBackground(COLOR_INITIALISED)
# CCS interface port
self.ccsPortField = UI.TKI.ValueField(self, row=3, label="CCS interface port:")
self.ccsPortField.set(EGSE.IF.s_serverConfiguration.ccsPort)
# CCS interface status 2
self.ccsStatusField2 = UI.TKI.ValueField(self, row=4, label="CCS interface status 2:")
self.ccsStatusField2.set("INIT")
self.ccsStatusField2.setBackground(COLOR_INITIALISED)
# CCS interface port 2
self.ccsPortField2 = UI.TKI.ValueField(self, row=5, label="CCS interface port 2:")
self.ccsPortField2.set(EGSE.IF.s_serverConfiguration.ccsPort2)
# log messages (default logger)
self.messageLogger = UI.TKI.MessageLogger(self)
self.appGrid(self.messageLogger, row=6, columnspan=2)
# message line
self.messageline = tkinter.Message(self, relief=tkinter.GROOVE)
self.appGrid(self.messageline,
row=7,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.EW)
self.grid(row=0, column=0, sticky=tkinter.EW+tkinter.NS)
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# ---------------------------------------------------------------------------
def fillCommandMenuItems(self):
"""
fill the command menu bar,
implementation of UI.TKI.GUItabView.fillCommandMenuItems
"""
self.addCommandMenuItem(label="EGSEenableAck1", command=self.egseEnableAck1Callback, enabled=False)
self.addCommandMenuItem(label="EGSEenableNak1", command=self.egseEnableNak1Callback)
self.addCommandMenuItem(label="EGSEdisableAck1", command=self.egseDisableAck1Callback)
self.addCommandMenuItem(label="EGSEenableAck2", command=self.egseEnableAck2Callback, enabled=False)
self.addCommandMenuItem(label="EGSEenableNak2", command=self.egseEnableNak2Callback)
self.addCommandMenuItem(label="EGSEdisableAck2", command=self.egseDisableAck2Callback)
# ---------------------------------------------------------------------------
def egseEnableAck1Callback(self):
"""Called when the EGSEenableAck1 menu entry is selected"""
self.notifyModelTask(["EGSEENABLEACK1"])
def egseEnableNak1Callback(self):
"""Called when the EGSEenableNak1 menu entry is selected"""
self.notifyModelTask(["EGSEENABLENAK1"])
def egseDisableAck1Callback(self):
"""Called when the EGSEdisableAck1 menu entry is selected"""
self.notifyModelTask(["EGSEDISABLEACK1"])
def ack1Callback(self):
"""Called when the ACK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK1"):
self.notifyModelTask(["EGSEENABLEACK1"])
else:
self.notifyModelTask(["EGSEDISABLEACK1"])
def nak1Callback(self):
"""Called when the NAK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK1"):
self.notifyModelTask(["EGSEENABLENAK1"])
else:
self.notifyModelTask(["EGSEDISABLEACK1"])
# ---------------------------------------------------------------------------
def egseEnableAck2Callback(self):
"""Called when the EGSEenableAck2 menu entry is selected"""
self.notifyModelTask(["EGSEENABLEACK2"])
def egseEnableNak2Callback(self):
"""Called when the EGSEenableNak2 menu entry is selected"""
self.notifyModelTask(["EGSEENABLENAK2"])
def egseDisableAck2Callback(self):
"""Called when the EGSEdisableAck2 menu entry is selected"""
self.notifyModelTask(["EGSEDISABLEACK2"])
def ack2Callback(self):
"""Called when the ACK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK2"):
self.notifyModelTask(["EGSEENABLEACK2"])
else:
self.notifyModelTask(["EGSEDISABLEACK2"])
def nak2Callback(self):
"""Called when the NAK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK2"):
self.notifyModelTask(["EGSEENABLENAK2"])
else:
self.notifyModelTask(["EGSEDISABLEACK2"])
# ---------------------------------------------------------------------------
def notifyStatus(self, status):
"""Generic callback when something changes in the model"""
if status == "CCS_CONNECTED":
self.ccsConnectedNotify()
elif status == "CCS_DISCONNECTED":
self.ccsDisconnectedNotify()
elif status == "CCS_CONNECTED2":
self.ccsConnected2Notify()
elif status == "CCS_DISCONNECTED2":
self.ccsDisconnected2Notify()
elif status == "EGSE_ENABLED_ACK1":
self.egseEnabledAck1Notify()
elif status == "EGSE_ENABLED_NAK1":
self.egseEnabledNak1Notify()
elif status == "EGSE_DISABLED_ACK1":
self.egseDisabledAck1Notify()
elif status == "EGSE_ENABLED_ACK2":
self.egseEnabledAck2Notify()
elif status == "EGSE_ENABLED_NAK2":
self.egseEnabledNak2Notify()
elif status == "EGSE_DISABLED_ACK2":
self.egseDisabledAck2Notify()
# ---------------------------------------------------------------------------
def ccsConnectedNotify(self):
"""Called when the CCS connect function is successfully processed"""
self.ccsStatusField.set("CONNECTED")
self.ccsStatusField.setBackground(COLOR_CONNECTED)
# ---------------------------------------------------------------------------
def ccsDisconnectedNotify(self):
"""Called when the CCS disconnect function is successfully processed"""
self.ccsStatusField.set("DISCONNECTED")
self.ccsStatusField.setBackground(COLOR_INITIALISED)
# ---------------------------------------------------------------------------
def ccsConnected2Notify(self):
"""Called when the CCS 2nd connect function is successfully processed"""
self.ccsStatusField2.set("CONNECTED")
self.ccsStatusField2.setBackground(COLOR_CONNECTED)
# ---------------------------------------------------------------------------
def ccsDisconnected2Notify(self):
"""Called when the CCS 2nd disconnect function is successfully processed"""
self.ccsStatusField2.set("DISCONNECTED")
self.ccsStatusField2.setBackground(COLOR_INITIALISED)
# ---------------------------------------------------------------------------
def egseEnabledAck1Notify(self):
"""Called when the egseEnabledAck1 function is successfully processed"""
self.disableCommandMenuItem("EGSEenableAck1")
self.enableCommandMenuItem("EGSEenableNak1")
self.enableCommandMenuItem("EGSEdisableAck1")
self.checkButtons.setButtonPressed("ACK1", True)
self.checkButtons.setButtonPressed("NAK1", False)
def egseEnabledNak1Notify(self):
"""Called when the egseEnabledNak1 function is successfully processed"""
self.enableCommandMenuItem("EGSEenableAck1")
self.disableCommandMenuItem("EGSEenableNak1")
self.enableCommandMenuItem("EGSEdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", True)
def egseDisabledAck1Notify(self):
"""Called when the egseDisabledAck1 function is successfully processed"""
self.enableCommandMenuItem("EGSEenableAck1")
self.enableCommandMenuItem("EGSEenableNak1")
self.disableCommandMenuItem("EGSEdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", False)
# ---------------------------------------------------------------------------
def egseEnabledAck2Notify(self):
"""Called when the egseEnabledAck2 function is successfully processed"""
self.disableCommandMenuItem("EGSEenableAck2")
self.enableCommandMenuItem("EGSEenableNak1")
self.enableCommandMenuItem("EGSEdisableAck2")
self.checkButtons.setButtonPressed("ACK2", True)
self.checkButtons.setButtonPressed("NAK2", False)
def egseEnabledNak2Notify(self):
"""Called when the egseEnabledNak2 function is successfully processed"""
self.enableCommandMenuItem("EGSEenableAck2")
self.disableCommandMenuItem("EGSEenableNak2")
self.enableCommandMenuItem("EGSEdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", True)
def egseDisabledAck2Notify(self):
"""Called when the egseDisabledAck2 function is successfully processed"""
self.enableCommandMenuItem("EGSEenableAck2")
self.enableCommandMenuItem("EGSEenableNak2")
self.disableCommandMenuItem("EGSEdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", False)
| {
"content_hash": "3d54793bb83df57a68065665c7448367",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 103,
"avg_line_length": 48.27014218009479,
"alnum_prop": 0.6402552773686794,
"repo_name": "Stefan-Korner/SpacePyLibrary",
"id": "b38c3d1fbca0f583b36c9d236fc75ff9baf4648e",
"size": "11319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SCOE/EGSEgui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11666"
},
{
"name": "Python",
"bytes": "1264766"
},
{
"name": "Shell",
"bytes": "23787"
}
],
"symlink_target": ""
} |
from application.settings import get_option
import logging
import requests
import traceback
import requests.exceptions
def send_notifications(entries):
"""
We don't expect any Exceptions from this function - all exceptions inside
this func must be catched and logged.
"""
notify_telegram(entries)
def notify_telegram(entries):
bot_token = get_option("notifications.telegram.bot_token", "")
if not bot_token:
return
chat_id = get_option("notifications.telegram.chat_id", "")
if not chat_id:
return
for entry in entries:
text = "[{}]({}) ({})".format(entry.title, entry.url, entry.subscription.source)
data = {
'chat_id': chat_id,
'text': text,
'parse_mode': 'Markdown'
}
try:
requests.post(
'https://api.telegram.org/bot{}/sendMessage'.format(bot_token),
data=data,
timeout=3
)
except requests.exceptions.RequestException:
log = logging.getLogger("app")
log.error('Error sending to Telegram: %s' % (traceback.format_exc()))
| {
"content_hash": "f8f73180211e90a12376d8fc78bebf65",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 88,
"avg_line_length": 27,
"alnum_prop": 0.5925925925925926,
"repo_name": "FZambia/reborn",
"id": "79bd8626b1f505a7858867352b3a6885b8723855",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/core/notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18399"
},
{
"name": "HTML",
"bytes": "546"
},
{
"name": "JavaScript",
"bytes": "51938"
},
{
"name": "Makefile",
"bytes": "531"
},
{
"name": "Python",
"bytes": "36652"
}
],
"symlink_target": ""
} |
"""Minimal example to call the GLOP solver."""
# [START program]
# [START import]
from ortools.linear_solver import pywraplp
# [END import]
def main():
# [START solver]
# Create the linear solver with the GLOP backend.
solver = pywraplp.Solver.CreateSolver('GLOP')
if not solver:
return
# [END solver]
# [START variables]
infinity = solver.infinity()
# Create the variables x and y.
x = solver.NumVar(0.0, infinity, 'x')
y = solver.NumVar(0.0, infinity, 'y')
print('Number of variables =', solver.NumVariables())
# [END variables]
# [START constraints]
# x + 7 * y <= 17.5.
solver.Add(x + 7 * y <= 17.5)
# x <= 3.5.
solver.Add(x <= 3.5)
print('Number of constraints =', solver.NumConstraints())
# [END constraints]
# [START objective]
# Maximize x + 10 * y.
solver.Maximize(x + 10 * y)
# [END objective]
# [START solve]
status = solver.Solve()
# [END solve]
# [START print_solution]
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
print('x =', x.solution_value())
print('y =', y.solution_value())
else:
print('The problem does not have an optimal solution.')
# [END print_solution]
# [START advanced]
print('\nAdvanced usage:')
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
# [END advanced]
if __name__ == '__main__':
main()
# [END program]
| {
"content_hash": "49770f269c10e6b7b1c11e8758f33d2e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 67,
"avg_line_length": 25.253968253968253,
"alnum_prop": 0.5933375235700817,
"repo_name": "google/or-tools",
"id": "4aae1e27b5a9997875377c425bbb4b629e5b3a52",
"size": "2191",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "ortools/linear_solver/samples/simple_lp_program.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
} |
def extractWhoistarantellaWordpressCom(item):
'''
Parser for 'whoistarantella.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "e41d1f813e9daa301aa64d0a163825d0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.095238095238095,
"alnum_prop": 0.6414762741652021,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b7c6e22905221bdd509cc73ccf918fce3b3c75a7",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWhoistarantellaWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import annotations # Python < 3.10
from typing import Any, List, NamedTuple, Optional, Union
__all__ = ["Path"]
class Path(NamedTuple):
"""A generic path of string or integer indices"""
prev: Any # Optional['Path'] (python/mypy/issues/731)
"""path with the previous indices"""
key: Union[str, int]
"""current index in the path (string or integer)"""
typename: Optional[str]
"""name of the parent type to avoid path ambiguity"""
def add_key(self, key: Union[str, int], typename: Optional[str] = None) -> Path:
"""Return a new Path containing the given key."""
return Path(self, key, typename)
def as_list(self) -> List[Union[str, int]]:
"""Return a list of the path keys."""
flattened: List[Union[str, int]] = []
append = flattened.append
curr: Path = self
while curr:
append(curr.key)
curr = curr.prev
return flattened[::-1]
| {
"content_hash": "464e80ce93a21d5e250c58410f32bcfa",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 31.451612903225808,
"alnum_prop": 0.6041025641025641,
"repo_name": "graphql-python/graphql-core",
"id": "19dd79ba4ef2af6f68edaa2d62b1e4fa8686e27f",
"size": "975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/graphql/pyutils/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2235538"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import random
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with a random blank character from a
valid set of alternate characters
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass several web application firewalls
>>> random.seed(0)
>>> tamper('SELECT id FROM users')
'SELECT%0Did%0DFROM%0Ausers'
"""
# ASCII table:
# TAB 09 horizontal TAB
# LF 0A new line
# FF 0C new page
# CR 0D carriage return
blanks = ("%09", "%0A", "%0C", "%0D")
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += random.choice(blanks)
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == ' ' and not doublequote and not quote:
retVal += random.choice(blanks)
continue
retVal += payload[i]
return retVal
| {
"content_hash": "f97405404d58bdd3840c628d7ccd385e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 24.8,
"alnum_prop": 0.533498759305211,
"repo_name": "JeyZeta/Dangerous",
"id": "98612534a32d091ba3194624b06c4bb1fb175678",
"size": "1635",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Dangerous/sqlmap/tamper/space2randomblank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
import requests
from settings import TEST_DATA
from suite.utils.policy_resources_utils import create_policy_from_yaml, delete_policy
from suite.utils.resources_utils import create_secret_from_yaml, delete_secret, wait_before_test
from suite.utils.ssl_utils import create_sni_session
from suite.utils.vs_vsr_resources_utils import (
patch_v_s_route_from_yaml,
patch_virtual_server_from_yaml,
read_vs,
read_vsr,
)
std_vs_src = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml"
std_vsr_src = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml"
std_vs_vsr_src = f"{TEST_DATA}/virtual-server-route/standard/virtual-server.yaml"
mtls_sec_valid_src = f"{TEST_DATA}/ingress-mtls/secret/ingress-mtls-secret.yaml"
tls_sec_valid_src = f"{TEST_DATA}/ingress-mtls/secret/tls-secret.yaml"
mtls_pol_valid_src = f"{TEST_DATA}/ingress-mtls/policies/ingress-mtls.yaml"
mtls_pol_invalid_src = f"{TEST_DATA}/ingress-mtls/policies/ingress-mtls-invalid.yaml"
mtls_vs_spec_src = f"{TEST_DATA}/ingress-mtls/spec/virtual-server-mtls.yaml"
mtls_vs_route_src = f"{TEST_DATA}/ingress-mtls/route-subroute/virtual-server-mtls.yaml"
mtls_vsr_subroute_src = f"{TEST_DATA}/ingress-mtls/route-subroute/virtual-server-route-mtls.yaml"
mtls_vs_vsr_src = f"{TEST_DATA}/ingress-mtls/route-subroute/virtual-server-vsr.yaml"
crt = f"{TEST_DATA}/ingress-mtls/client-auth/valid/client-cert.pem"
key = f"{TEST_DATA}/ingress-mtls/client-auth/valid/client-key.pem"
invalid_crt = f"{TEST_DATA}/ingress-mtls/client-auth/invalid/client-cert.pem"
invalid_key = f"{TEST_DATA}/ingress-mtls/client-auth/invalid/client-cert.pem"
def setup_policy(kube_apis, test_namespace, mtls_secret, tls_secret, policy):
print(f"Create ingress-mtls secret")
mtls_secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, mtls_secret)
print(f"Create ingress-mtls policy")
pol_name = create_policy_from_yaml(kube_apis.custom_objects, policy, test_namespace)
print(f"Create tls secret")
tls_secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, tls_secret)
return mtls_secret_name, tls_secret_name, pol_name
def teardown_policy(kube_apis, test_namespace, tls_secret, pol_name, mtls_secret):
print("Delete policy and related secrets")
delete_secret(kube_apis.v1, tls_secret, test_namespace)
delete_policy(kube_apis.custom_objects, pol_name, test_namespace)
delete_secret(kube_apis.v1, mtls_secret, test_namespace)
@pytest.mark.policies
@pytest.mark.parametrize(
"crd_ingress_controller, virtual_server_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-leader-election=false",
],
},
{
"example": "virtual-server",
"app_type": "simple",
},
)
],
indirect=True,
)
class TestIngressMtlsPolicyVS:
@pytest.mark.parametrize(
"policy_src, vs_src, expected_code, expected_text, vs_message, vs_state",
[
(
mtls_pol_valid_src,
mtls_vs_spec_src,
200,
"Server address:",
"was added or updated",
"Valid",
),
(
mtls_pol_valid_src,
mtls_vs_route_src,
500,
"Internal Server Error",
"is not allowed in the route context",
"Warning",
),
(
mtls_pol_invalid_src,
mtls_vs_spec_src,
500,
"Internal Server Error",
"is missing or invalid",
"Warning",
),
],
)
@pytest.mark.smoke
def test_ingress_mtls_policy(
self,
kube_apis,
crd_ingress_controller,
virtual_server_setup,
test_namespace,
policy_src,
vs_src,
expected_code,
expected_text,
vs_message,
vs_state,
):
"""
Test ingress-mtls with valid and invalid policy in vs spec and route contexts.
"""
session = create_sni_session()
mtls_secret, tls_secret, pol_name = setup_policy(
kube_apis,
test_namespace,
mtls_sec_valid_src,
tls_sec_valid_src,
policy_src,
)
print(f"Patch vs with policy: {policy_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_src,
virtual_server_setup.namespace,
)
wait_before_test()
resp = session.get(
virtual_server_setup.backend_1_url_ssl,
cert=(crt, key),
headers={"host": virtual_server_setup.vs_host},
allow_redirects=False,
verify=False,
)
vs_res = read_vs(kube_apis.custom_objects, test_namespace, virtual_server_setup.vs_name)
teardown_policy(kube_apis, test_namespace, tls_secret, pol_name, mtls_secret)
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
std_vs_src,
virtual_server_setup.namespace,
)
assert (
resp.status_code == expected_code
and expected_text in resp.text
and vs_message in vs_res["status"]["message"]
and vs_res["status"]["state"] == vs_state
)
@pytest.mark.parametrize(
"certificate, expected_code, expected_text, exception",
[
((crt, key), 200, "Server address:", ""),
("", 400, "No required SSL certificate was sent", ""),
((invalid_crt, invalid_key), "None", "None", "Caused by SSLError"),
],
)
def test_ingress_mtls_policy_cert(
self,
kube_apis,
crd_ingress_controller,
virtual_server_setup,
test_namespace,
certificate,
expected_code,
expected_text,
exception,
):
"""
Test ingress-mtls with valid and invalid policy
"""
session = create_sni_session()
mtls_secret, tls_secret, pol_name = setup_policy(
kube_apis,
test_namespace,
mtls_sec_valid_src,
tls_sec_valid_src,
mtls_pol_valid_src,
)
print(f"Patch vs with policy: {mtls_pol_valid_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
mtls_vs_spec_src,
virtual_server_setup.namespace,
)
wait_before_test()
ssl_exception = ""
resp = ""
try:
resp = session.get(
virtual_server_setup.backend_1_url_ssl,
cert=certificate,
headers={"host": virtual_server_setup.vs_host},
allow_redirects=False,
verify=False,
)
except requests.exceptions.SSLError as e:
print(f"SSL certificate exception: {e}")
ssl_exception = str(e)
resp = mock.Mock()
resp.status_code = "None"
resp.text = "None"
teardown_policy(kube_apis, test_namespace, tls_secret, pol_name, mtls_secret)
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
std_vs_src,
virtual_server_setup.namespace,
)
assert resp.status_code == expected_code and expected_text in resp.text and exception in ssl_exception
@pytest.mark.policies
@pytest.mark.parametrize(
"crd_ingress_controller, v_s_route_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-leader-election=false",
],
},
{"example": "virtual-server-route"},
)
],
indirect=True,
)
class TestIngressMtlsPolicyVSR:
def test_ingress_mtls_policy_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
):
"""
Test ingress-mtls in vsr subroute context.
"""
mtls_secret, tls_secret, pol_name = setup_policy(
kube_apis,
v_s_route_setup.route_m.namespace,
mtls_sec_valid_src,
tls_sec_valid_src,
mtls_pol_valid_src,
)
print(f"Patch vsr with policy: {mtls_vsr_subroute_src} and vs with tls secret: {tls_secret}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.vs_name,
mtls_vs_vsr_src,
v_s_route_setup.namespace,
)
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
mtls_vsr_subroute_src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
vsr_res = read_vsr(
kube_apis.custom_objects,
v_s_route_setup.route_m.namespace,
v_s_route_setup.route_m.name,
)
teardown_policy(kube_apis, v_s_route_setup.route_m.namespace, tls_secret, pol_name, mtls_secret)
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
std_vsr_src,
v_s_route_setup.route_m.namespace,
)
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.vs_name,
std_vs_vsr_src,
v_s_route_setup.namespace,
)
assert (
vsr_res["status"]["state"] == "Warning"
and f"{pol_name} is not allowed in the subroute context" in vsr_res["status"]["message"]
)
| {
"content_hash": "42d372da7233342b812e95e41946fc03",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 110,
"avg_line_length": 33.36,
"alnum_prop": 0.5660471622701838,
"repo_name": "nginxinc/kubernetes-ingress",
"id": "6ebe7922c4ab5a966ff14ced8b14ae4b2a174193",
"size": "10008",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/suite/test_ingress_mtls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "17266"
},
{
"name": "Go",
"bytes": "2018886"
},
{
"name": "JavaScript",
"bytes": "12899"
},
{
"name": "Makefile",
"bytes": "11737"
},
{
"name": "Mustache",
"bytes": "2980"
},
{
"name": "Python",
"bytes": "1037476"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
import json
from django.test import TestCase
from django.urls import reverse
from mapentity.tests.factories import UserFactory
from rest_framework.test import APITestCase
from geotrek.zoning.tests.factories import RestrictedAreaFactory, RestrictedAreaTypeFactory
from geotrek.zoning.templatetags.zoning_tags import all_restricted_areas, restricted_areas_by_type
class LandLayersViewsTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
def setUp(self):
self.client.force_authenticate(self.user)
def test_views_status(self):
for layer in ['city', 'restrictedarea', 'district']:
url = reverse('zoning:%s_layer' % layer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.json())
class RestrictedAreaViewsTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
def setUp(self) -> None:
self.client.force_authenticate(self.user)
def test_views_status_is_404_when_type_unknown(self):
url = reverse('zoning:restrictedarea_type_layer', kwargs={'type_pk': 1023})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_views_status_is_200_when_type_known(self):
t = RestrictedAreaTypeFactory()
url = reverse('zoning:restrictedarea_type_layer', kwargs={'type_pk': t.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.json())
class RestrictedAreasSerializationTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.type_1 = RestrictedAreaTypeFactory(name="ABC")
cls.type_2 = RestrictedAreaTypeFactory(name="AAC")
cls.type_3 = RestrictedAreaTypeFactory(name="ABB")
cls.type_4 = RestrictedAreaTypeFactory(name="AAA")
cls.area_1 = RestrictedAreaFactory(area_type=cls.type_1, name="aaa")
cls.area_2 = RestrictedAreaFactory(area_type=cls.type_1, name="aab")
cls.area_3 = RestrictedAreaFactory(area_type=cls.type_2, name="aaa")
cls.area_4 = RestrictedAreaFactory(area_type=cls.type_2, name="aab")
cls.area_5 = RestrictedAreaFactory(area_type=cls.type_3, name="aab")
cls.area_6 = RestrictedAreaFactory(area_type=cls.type_3, name="aaa")
cls.area_7 = RestrictedAreaFactory(area_type=cls.type_4, name="aba")
cls.area_8 = RestrictedAreaFactory(area_type=cls.type_4, name="aca")
cls.area_9 = RestrictedAreaFactory(area_type=cls.type_4, name="aaa")
def test_restricted_areas_by_type_serizalization(self):
""" Test restricted areas are sorted by type and ordered alphabetically within types
"""
with self.assertNumQueries(2):
serizalized = restricted_areas_by_type()
correct_data = json.dumps({
f"{self.type_1.pk}": {"areas": [{f"{self.area_1.pk}": "ABC - aaa"}, {f"{self.area_2.pk}": "ABC - aab"}]},
f"{self.type_2.pk}": {"areas": [{f"{self.area_3.pk}": "AAC - aaa"}, {f"{self.area_4.pk}": "AAC - aab"}]},
f"{self.type_3.pk}": {"areas": [{f"{self.area_6.pk}": "ABB - aaa"}, {f"{self.area_5.pk}": "ABB - aab"}]},
f"{self.type_4.pk}": {"areas": [{f"{self.area_9.pk}": "AAA - aaa"}, {f"{self.area_7.pk}": "AAA - aba"}, {f"{self.area_8.pk}": "AAA - aca"}]}
})
self.assertJSONEqual(serizalized, correct_data)
def test_all_restricted_areas_serizalization(self):
""" Test restricted areas are ordered alphabetically by type name then by area name
"""
serizalized = all_restricted_areas()
correct_data = json.dumps([
{f"{self.area_9.pk}": "AAA - aaa"},
{f"{self.area_7.pk}": "AAA - aba"},
{f"{self.area_8.pk}": "AAA - aca"},
{f"{self.area_3.pk}": "AAC - aaa"},
{f"{self.area_4.pk}": "AAC - aab"},
{f"{self.area_6.pk}": "ABB - aaa"},
{f"{self.area_5.pk}": "ABB - aab"},
{f"{self.area_1.pk}": "ABC - aaa"},
{f"{self.area_2.pk}": "ABC - aab"}
])
self.assertJSONEqual(serizalized, correct_data)
| {
"content_hash": "cf10b5d07833e09bf3bd5f0fd78e7545",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 152,
"avg_line_length": 45.47826086956522,
"alnum_prop": 0.6252390057361377,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "e09fd587202e8bf08d6f04ccaf09ea54e3b64689",
"size": "4184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geotrek/zoning/tests/test_views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from ._version import __version__
__title__ = "geetools"
__summary__ = "A set of useful tools to use with Google Earth Engine Python" \
"API"
__uri__ = "http://geetools.readthedocs.io"
__author__ = "Rodrigo E. Principe"
__email__ = "fitoprincipe82@gmail.com"
__license__ = "MIT"
__copyright__ = "2017 Rodrigo E. Principe"
try:
from . import tools, bitreader, cloud_mask, expressions, decision_tree,\
filters, indices, batch, algorithms, composite,\
manager, utils, collection, oauth, visualization, \
classification
from .tools import array, date, dictionary, ee_list, featurecollection, \
geometry, image, imagecollection, number, string
from .ui import eprint
from .batch import Export, Import, Convert, Download
from .oauth import Initialize
from .utils import evaluate
except ImportError:
pass
| {
"content_hash": "e127a1ec94d7bec40559d4b637a944a0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.6447916666666667,
"repo_name": "gee-community/gee_tools",
"id": "ddeb23ff29b0962da898ece6a85e5ff5bdd090cf",
"size": "975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geetools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "254578"
},
{
"name": "Python",
"bytes": "290230"
}
],
"symlink_target": ""
} |
import os
# We want to test the utility functions in the settings
# module, not the settings themselves, so we'll use
# unittest directly (instead of importing django.test)
import unittest
from django.core.exceptions import ImproperlyConfigured
from cotracker.settings import base
class SettingsTests(unittest.TestCase):
def setUp(self):
self.success_key = 'TEST_RANDOM_KEY_EXISTS'
self.failure_key = 'TEST_RANDOM_KEY_MISSING'
os.environ[self.success_key] = self.success_key
def test_get_env_var_success(self):
self.assertEqual(base.get_env_var(self.success_key), self.success_key)
def test_get_env_var_failure(self):
self.assertRaises(ImproperlyConfigured, base.get_env_var, self.failure_key)
def tearDown(self):
os.environ.pop(self.success_key)
| {
"content_hash": "4eabbf0ba9b01aecf2b2fcbc1b1c10cc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.7037914691943128,
"repo_name": "eallrich/checkniner",
"id": "5fca324d4a3946bad279f153dff4d9ee6d21a110",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cotracker/checkouts/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3804"
},
{
"name": "HTML",
"bytes": "18395"
},
{
"name": "Procfile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "134565"
},
{
"name": "Shell",
"bytes": "18789"
}
],
"symlink_target": ""
} |
"""Dataclass for a Python package."""
import configparser
from contextlib import contextmanager
import copy
from dataclasses import dataclass, asdict
import io
import json
import os
from pathlib import Path
import pprint
import re
import shutil
from typing import Any, Dict, List, Optional, Iterable
_pretty_format = pprint.PrettyPrinter(indent=1, width=120).pformat
# List of known environment markers supported by pip.
# https://peps.python.org/pep-0508/#environment-markers
_PY_REQUIRE_ENVIRONMENT_MARKER_NAMES = [
'os_name',
'sys_platform',
'platform_machine',
'platform_python_implementation',
'platform_release',
'platform_system',
'platform_version',
'python_version',
'python_full_version',
'implementation_name',
'implementation_version',
'extra',
]
@contextmanager
def change_working_dir(directory: Path):
original_dir = Path.cwd()
try:
os.chdir(directory)
yield directory
finally:
os.chdir(original_dir)
class UnknownPythonPackageName(Exception):
"""Exception thrown when a Python package_name cannot be determined."""
class MissingSetupSources(Exception):
"""Exception thrown when a Python package is missing setup source files.
For example: setup.cfg and pyproject.toml.i
"""
def _sanitize_install_requires(metadata_dict: dict) -> dict:
"""Convert install_requires lists into strings joined with line breaks."""
try:
install_requires = metadata_dict['options']['install_requires']
if isinstance(install_requires, list):
metadata_dict['options']['install_requires'] = (
'\n'.join(install_requires))
except KeyError:
pass
return metadata_dict
@dataclass
class PythonPackage:
"""Class to hold a single Python package's metadata."""
sources: List[Path]
setup_sources: List[Path]
tests: List[Path]
inputs: List[Path]
gn_target_name: str = ''
generate_setup: Optional[Dict] = None
config: Optional[configparser.ConfigParser] = None
@staticmethod
def from_dict(**kwargs) -> 'PythonPackage':
"""Build a PythonPackage instance from a dictionary."""
transformed_kwargs = copy.copy(kwargs)
# Transform string filenames to Paths
for attribute in ['sources', 'tests', 'inputs', 'setup_sources']:
transformed_kwargs[attribute] = [
Path(s) for s in kwargs[attribute]
]
return PythonPackage(**transformed_kwargs)
def __post_init__(self):
# Read the setup.cfg file if possible
if not self.config:
self.config = self._load_config()
@property
def setup_dir(self) -> Optional[Path]:
if not self.setup_sources:
return None
# Assuming all setup_source files live in the same parent directory.
return self.setup_sources[0].parent
@property
def setup_py(self) -> Path:
setup_py = [
setup_file for setup_file in self.setup_sources
if str(setup_file).endswith('setup.py')
]
# setup.py will not exist for GN generated Python packages
assert len(setup_py) == 1
return setup_py[0]
@property
def setup_cfg(self) -> Optional[Path]:
setup_cfg = [
setup_file for setup_file in self.setup_sources
if str(setup_file).endswith('setup.cfg')
]
if len(setup_cfg) < 1:
return None
return setup_cfg[0]
def as_dict(self) -> Dict[Any, Any]:
"""Return a dict representation of this class."""
self_dict = asdict(self)
if self.config:
# Expand self.config into text.
setup_cfg_text = io.StringIO()
self.config.write(setup_cfg_text)
self_dict['config'] = setup_cfg_text.getvalue()
return self_dict
@property
def package_name(self) -> str:
unknown_package_message = (
'Cannot determine the package_name for the Python '
f'library/package: {self.gn_target_name}\n\n'
'This could be due to a missing python dependency in GN for:\n'
f'{self.gn_target_name}\n\n')
if self.config:
try:
name = self.config['metadata']['name']
except KeyError:
raise UnknownPythonPackageName(unknown_package_message +
_pretty_format(self.as_dict()))
return name
top_level_source_dir = self.top_level_source_dir
if top_level_source_dir:
return top_level_source_dir.name
actual_gn_target_name = self.gn_target_name.split(':')
if len(actual_gn_target_name) < 2:
raise UnknownPythonPackageName(unknown_package_message)
return actual_gn_target_name[-1]
@property
def package_dir(self) -> Path:
if self.setup_cfg and self.setup_cfg.is_file():
return self.setup_cfg.parent / self.package_name
root_source_dir = self.top_level_source_dir
if root_source_dir:
return root_source_dir
return self.sources[0].parent
@property
def top_level_source_dir(self) -> Optional[Path]:
source_dir_paths = sorted(set(
(len(sfile.parts), sfile.parent) for sfile in self.sources),
key=lambda s: s[1])
if not source_dir_paths:
return None
top_level_source_dir = source_dir_paths[0][1]
if not top_level_source_dir.is_dir():
return None
return top_level_source_dir
def _load_config(self) -> Optional[configparser.ConfigParser]:
config = configparser.ConfigParser()
# Check for a setup.cfg and load that config.
if self.setup_cfg:
if self.setup_cfg.is_file():
with self.setup_cfg.open() as config_file:
config.read_file(config_file)
return config
if self.setup_cfg.with_suffix('.json').is_file():
return self._load_setup_json_config()
# Fallback on the generate_setup scope from GN
if self.generate_setup:
config.read_dict(_sanitize_install_requires(self.generate_setup))
return config
return None
def _load_setup_json_config(self) -> configparser.ConfigParser:
assert self.setup_cfg
setup_json = self.setup_cfg.with_suffix('.json')
config = configparser.ConfigParser()
with setup_json.open() as json_fp:
json_dict = _sanitize_install_requires(json.load(json_fp))
config.read_dict(json_dict)
return config
def copy_sources_to(self, destination: Path) -> None:
"""Copy this PythonPackage source files to another path."""
new_destination = destination / self.package_dir.name
new_destination.mkdir(parents=True, exist_ok=True)
shutil.copytree(self.package_dir, new_destination, dirs_exist_ok=True)
def install_requires_entries(self) -> List[str]:
"""Convert the install_requires entry into a list of strings."""
this_requires: List[str] = []
# If there's no setup.cfg, do nothing.
if not self.config:
return this_requires
# Requires are delimited by newlines or semicolons.
# Split existing list on either one.
for req in re.split(r' *[\n;] *',
self.config['options']['install_requires']):
# Skip empty lines.
if not req:
continue
# Get the name part part of the dep, ignoring any spaces or
# other characters.
req_name_match = re.match(r'^(?P<name_part>[A-Za-z0-9_-]+)', req)
if not req_name_match:
continue
req_name = req_name_match.groupdict().get('name_part', '')
# Check if this is an environment marker.
if req_name in _PY_REQUIRE_ENVIRONMENT_MARKER_NAMES:
# Append this req as an environment marker for the previous
# requirement.
this_requires[-1] += f';{req}'
continue
# Normal pip requirement, save to this_requires.
this_requires.append(req)
return this_requires
def load_packages(input_list_files: Iterable[Path],
ignore_missing=False) -> List[PythonPackage]:
"""Load Python package metadata and configs."""
packages = []
for input_path in input_list_files:
if ignore_missing and not input_path.is_file():
continue
with input_path.open() as input_file:
# Each line contains the path to a json file.
for json_file in input_file.readlines():
# Load the json as a dict.
json_file_path = Path(json_file.strip()).resolve()
with json_file_path.open() as json_fp:
json_dict = json.load(json_fp)
packages.append(PythonPackage.from_dict(**json_dict))
return packages
| {
"content_hash": "0c712715b9c5658d6d873953af6bda22",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 78,
"avg_line_length": 34.30451127819549,
"alnum_prop": 0.6021917808219178,
"repo_name": "google/pigweed",
"id": "49376fdcd6d406693a6cc766ef9c1e49134e83da",
"size": "9709",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pw_build/py/pw_build/python_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8654"
},
{
"name": "C",
"bytes": "487991"
},
{
"name": "C++",
"bytes": "6119052"
},
{
"name": "CMake",
"bytes": "288698"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "Go",
"bytes": "18932"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Java",
"bytes": "327548"
},
{
"name": "JavaScript",
"bytes": "12482"
},
{
"name": "Jinja",
"bytes": "2467"
},
{
"name": "Python",
"bytes": "3578966"
},
{
"name": "Rust",
"bytes": "645"
},
{
"name": "SCSS",
"bytes": "1382"
},
{
"name": "Shell",
"bytes": "22974"
},
{
"name": "Smarty",
"bytes": "692"
},
{
"name": "Starlark",
"bytes": "489444"
},
{
"name": "TypeScript",
"bytes": "235169"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class UsageRecordList(ListResource):
def __init__(self, version):
"""
Initialize the UsageRecordList
:param Version version: Version that contains the resource
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordList
"""
super(UsageRecordList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/UsageRecords'.format(**self._solution)
def stream(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Streams UsageRecordInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(end=end, start=start, granularity=granularity, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Lists UsageRecordInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
return list(self.stream(
end=end,
start=start,
granularity=granularity,
limit=limit,
page_size=page_size,
))
def page(self, end=values.unset, start=values.unset, granularity=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UsageRecordInstance records from the API.
Request is executed immediately
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
data = values.of({
'End': serialize.iso8601_datetime(end),
'Start': serialize.iso8601_datetime(start),
'Granularity': granularity,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return UsageRecordPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UsageRecordInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UsageRecordPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordList>'
class UsageRecordPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the UsageRecordPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordPage
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
super(UsageRecordPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UsageRecordInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
return UsageRecordInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordPage>'
class UsageRecordInstance(InstanceResource):
class Granularity(object):
HOURLY = "hourly"
DAILY = "daily"
ALL = "all"
def __init__(self, version, payload):
"""
Initialize the UsageRecordInstance
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
super(UsageRecordInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'period': payload.get('period'),
'commands': payload.get('commands'),
'data': payload.get('data'),
}
# Context
self._context = None
self._solution = {}
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def period(self):
"""
:returns: The time period for which usage is reported
:rtype: dict
"""
return self._properties['period']
@property
def commands(self):
"""
:returns: An object that describes the aggregated Commands usage for all SIMs during the specified period
:rtype: dict
"""
return self._properties['commands']
@property
def data(self):
"""
:returns: An object that describes the aggregated Data usage for all SIMs over the period
:rtype: dict
"""
return self._properties['data']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordInstance>'
| {
"content_hash": "d88caaaf505b1d49ec1ee04fe1efac60",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 114,
"avg_line_length": 36.70281124497992,
"alnum_prop": 0.6279680490206806,
"repo_name": "twilio/twilio-python",
"id": "422eccbfebd25b0bcf371b5b733f35fe2490c1bc",
"size": "9154",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/wireless/v1/usage_record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
"""This module allows adding and deleting of projects to the local manifest."""
from __future__ import print_function
import logging
import platform
import optparse
import os
import sys
import xml.etree.ElementTree as ElementTree
from chromite.lib import cros_build_lib
from chromite.lib import git
class Manifest(object):
"""Class which provides an abstraction for manipulating the local manifest."""
@classmethod
def FromPath(cls, path, empty_if_missing=False):
if os.path.isfile(path):
with open(path) as f:
return cls(f.read())
elif empty_if_missing:
cros_build_lib.Die('Manifest file, %r, not found' % path)
return cls()
def __init__(self, text=None):
self._text = text or '<manifest>\n</manifest>'
self.nodes = ElementTree.fromstring(self._text)
def AddNonWorkonProject(self, name, path, remote=None, revision=None):
"""Add a new nonworkon project element to the manifest tree."""
element = ElementTree.Element('project', name=name, path=path,
remote=remote)
element.attrib['workon'] = 'False'
if revision is not None:
element.attrib['revision'] = revision
self.nodes.append(element)
return element
def GetProject(self, name, path=None):
"""Accessor method for getting a project node from the manifest tree.
Returns:
project element node from ElementTree, otherwise, None
"""
if path is None:
# Use a unique value that can't ever match.
path = object()
for project in self.nodes.findall('project'):
if project.attrib['name'] == name or project.attrib['path'] == path:
return project
return None
def ToString(self):
# Reset the tail for each node, then just do a hacky replace.
project = None
for project in self.nodes.findall('project'):
project.tail = '\n '
if project is not None:
# Tweak the last project to not have the trailing space.
project.tail = '\n'
# Fix manifest tag text and tail.
self.nodes.text = '\n '
self.nodes.tail = '\n'
return ElementTree.tostring(self.nodes)
def GetProjects(self):
return list(self.nodes.findall('project'))
def _AddProjectsToManifestGroups(options, *args):
"""Enable the given manifest groups for the configured repository."""
groups_to_enable = ['name:%s' % x for x in args]
git_config = options.git_config
cmd = ['config', '-f', git_config, '--get', 'manifest.groups']
enabled_groups = git.RunGit('.', cmd, error_code_ok=True).output.split(',')
# Note that ordering actually matters, thus why the following code
# is written this way.
# Per repo behaviour, enforce an appropriate platform group if
# we're converting from a default manifest group to a limited one.
# Finally, note we reprocess the existing groups; this is to allow
# us to cleanup any user screwups, or our own screwups.
requested_groups = (
['minilayout', 'platform-%s' % (platform.system().lower(),)] +
enabled_groups + list(groups_to_enable))
processed_groups = set()
finalized_groups = []
for group in requested_groups:
if group not in processed_groups:
finalized_groups.append(group)
processed_groups.add(group)
cmd = ['config', '-f', git_config, 'manifest.groups',
','.join(finalized_groups)]
git.RunGit('.', cmd)
def _UpgradeMinilayout(options):
"""Convert a repo checkout away from minilayout.xml to default.xml."""
full_tree = Manifest.FromPath(options.default_manifest_path)
local_manifest_exists = os.path.exists(options.local_manifest_path)
new_groups = []
if local_manifest_exists:
local_tree = Manifest.FromPath(options.local_manifest_path)
# Identify which projects need to be transferred across.
projects = local_tree.GetProjects()
new_groups = [x.attrib['name'] for x in projects]
allowed = set(x.attrib['name'] for x in full_tree.GetProjects())
transferred = [x for x in projects if x.attrib['name'] in allowed]
for project in transferred:
# Mangle local_manifest object, removing those projects;
# note we'll still be adding those projects to the default groups,
# including those that didn't intersect the main manifest.
local_tree.nodes.remove(project)
_AddProjectsToManifestGroups(options, *new_groups)
if local_manifest_exists:
# Rewrite the local_manifest now; if there is no settings left in
# the local_manifest, wipe it.
if local_tree.nodes.getchildren():
with open(options.local_manifest_path, 'w') as f:
f.write(local_tree.ToString())
else:
os.unlink(options.local_manifest_path)
# Finally, move the symlink.
os.unlink(options.manifest_sym_path)
os.symlink('manifests/default.xml', options.manifest_sym_path)
logging.info("Converted the checkout to manifest groups based minilayout.")
def main(argv):
parser = optparse.OptionParser(usage='usage: %prog add [options] <name> '
'<--workon | <path> --remote <remote> >')
parser.add_option('-w', '--workon', action='store_true', dest='workon',
default=False, help='Is this a workon package?')
parser.add_option('-r', '--remote', dest='remote',
default=None)
parser.add_option('-v', '--revision', dest='revision',
default=None,
help="Use to override the manifest defined default "
"revision used for a given project.")
parser.add_option('--upgrade-minilayout', default=False, action='store_true',
help="Upgrade a minilayout checkout into a full.xml "
"checkout utilizing manifest groups.")
(options, args) = parser.parse_args(argv)
repo_dir = git.FindRepoDir(os.getcwd())
if not repo_dir:
parser.error("This script must be invoked from within a repository "
"checkout.")
options.git_config = os.path.join(repo_dir, 'manifests.git', 'config')
options.repo_dir = repo_dir
options.local_manifest_path = os.path.join(repo_dir, 'local_manifest.xml')
# This constant is used only when we're doing an upgrade away from
# minilayout.xml to default.xml.
options.default_manifest_path = os.path.join(repo_dir, 'manifests',
'default.xml')
options.manifest_sym_path = os.path.join(repo_dir, 'manifest.xml')
active_manifest = os.path.basename(os.readlink(options.manifest_sym_path))
upgrade_required = active_manifest == 'minilayout.xml'
if options.upgrade_minilayout:
if args:
parser.error("--upgrade-minilayout takes no arguments.")
if not upgrade_required:
print("This repository checkout isn't using minilayout.xml; "
"nothing to do")
else:
_UpgradeMinilayout(options)
return 0
elif upgrade_required:
logging.warn(
"Your repository checkout is using the old minilayout.xml workflow; "
"auto-upgrading it.")
cros_build_lib.RunCommand(
[sys.argv[0], '--upgrade-minilayout'], cwd=os.getcwd(), print_cmd=False)
if not args:
parser.error("No command specified.")
elif args[0] != 'add':
parser.error("Only supported subcommand is add right now.")
elif options.workon:
if len(args) != 2:
parser.error(
"Argument count is wrong for --workon; must be add <project>")
name, path = args[1], None
else:
if options.remote is None:
parser.error('Adding non-workon projects requires a remote.')
elif len(args) != 3:
parser.error(
"Argument count is wrong for non-workon mode; "
"must be add <project> <path> --remote <remote-arg>")
name, path = args[1:]
revision = options.revision
if revision is not None:
if (not git.IsRefsTags(revision) and
not git.IsSHA1(revision)):
revision = git.StripRefsHeads(revision, False)
main_manifest = Manifest.FromPath(options.manifest_sym_path,
empty_if_missing=False)
local_manifest = Manifest.FromPath(options.local_manifest_path)
main_element = main_manifest.GetProject(name, path=path)
if options.workon:
if main_element is None:
parser.error('No project named %r in the default manifest.' % name)
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
elif main_element is not None:
if options.remote is not None:
# Likely this project wasn't meant to be remote, so workon main element
print("Project already exists in manifest. Using that as workon project.")
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
else:
# Conflict will occur; complain.
parser.error("Requested project name=%r path=%r will conflict with "
"your current manifest %s" % (name, path, active_manifest))
elif local_manifest.GetProject(name, path=path) is not None:
parser.error("Requested project name=%r path=%r conflicts with "
"your local_manifest.xml" % (name, path))
else:
element = local_manifest.AddNonWorkonProject(name=name, path=path,
remote=options.remote,
revision=revision)
_AddProjectsToManifestGroups(options, element.attrib['name'])
with open(options.local_manifest_path, 'w') as f:
f.write(local_manifest.ToString())
return 0
| {
"content_hash": "04e91fd2fdd176899c87456060cfb537",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 80,
"avg_line_length": 37.89156626506024,
"alnum_prop": 0.6586115527291998,
"repo_name": "mxOBS/deb-pkg_trusty_chromium-browser",
"id": "30ee8397fb7aae32c624ea485ccc4427896b127c",
"size": "9605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/scripts/loman.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "230130"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "12435900"
},
{
"name": "C++",
"bytes": "264378706"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "795726"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "31783"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "19491230"
},
{
"name": "Java",
"bytes": "7637875"
},
{
"name": "JavaScript",
"bytes": "12723911"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "14392"
},
{
"name": "Makefile",
"bytes": "208315"
},
{
"name": "Objective-C",
"bytes": "1460032"
},
{
"name": "Objective-C++",
"bytes": "7760068"
},
{
"name": "PLpgSQL",
"bytes": "175360"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427212"
},
{
"name": "Python",
"bytes": "11447382"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104846"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1208350"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
from frappe.model.document import Document
class NumberCardLink(Document):
pass
| {
"content_hash": "59dce3a7f57d3beab6b6d041e186ac8d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 20.5,
"alnum_prop": 0.8292682926829268,
"repo_name": "almeidapaulopt/frappe",
"id": "0b55ae6dcdc4458b4f06682f06401f3cd4ee657e",
"size": "210",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/desk/doctype/number_card_link/number_card_link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
from collections import Counter, defaultdict
# import itertools
import sys
import os
import numpy as np
class Riffer():
def __init__(self, dir_in, newline_chars=True):
self.spliton = (' ' if newline_chars else None) # split defaults to whitespace
d_counter = self.parseFiles(dir_in)
# probability dictionary of markov chain -->
# word: ( [list,of,next,words], [prob,of,next,words] )
self.d = {k: ( v.keys(), (np.array(v.values(), dtype=np.float32)
/ sum(v.values())) )
for k,v in d_counter.items()}
def parseFiles(self, dir_in):
d = defaultdict(lambda: Counter())
# depth = 1
# dirname, subdirs, files = os.walk(os.path.abspath(dir_in)).next()
for root, dirs, files in os.walk(os.path.abspath(dir_in)):
for filepath in (os.path.join(root, f) for f in files):
d = self.txt2Markov(d, filepath) # update d_counter
return d
def txt2Markov(self, d_counter, file_in):
with open(file_in,'r') as f:
# TODO: multiple whitespace in a row ?
words = f.read().strip().lower().split(self.spliton)
# generalizable n-gram sliding window
# for w1, w2 in zip(*(itertools.islice(words, i, None) for i in range(2))):
for w1, w2 in zip(words, words[1:]):
d_counter[w1].update([w2])
return d_counter
def freestyle(self, word=None, continue_for=np.inf, accum=[]):
if not word: # seed randomly from all words
word = np.random.choice(self.d.keys())
try:
choices, probs = self.d[word]
nxt = np.random.choice(choices, p=probs)
except(KeyError):
# why does this happen ?
print(f'uh oh: {word}')
import IPython; IPython.embed()
accum.append(nxt)
if continue_for:
try:
self.freestyle(nxt, continue_for - 1, accum)
except(RuntimeError):
pass
return ' '.join(accum)
# def iterWords(file_in):
# for line in file_in:
# for word in line.strip().split():
# yield word
if __name__ == "__main__":
try:
DIR = sys.argv[1]
except(IndexError):
DIR = './kendrick'
x = Riffer(DIR)
print(x.freestyle())
#print(x.freestyle(continue_for=100))
| {
"content_hash": "702bb4f3a9ef8e9fd9894c5458bea870",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 86,
"avg_line_length": 29.691358024691358,
"alnum_prop": 0.5505197505197506,
"repo_name": "meereeum/markov-freestyle",
"id": "3550f959a96a15adedeaab244dc814bc54e84cd6",
"size": "2405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markov.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5033"
}
],
"symlink_target": ""
} |
"""
Simulation of the string with mass example,
with flatness based state feedback and flatness based state observer
(design + approximation), presented in [RW2018a]_.
References:
.. [RW2018a] Marcus Riesmeier and Frank Woittennek;
Modale Approximation eines verteiltparametrischen Beobachters
für das Modell der Saite mit Last. GMA Fachausschuss 1.40
„Systemtheorie und Regelungstechnik“, Salzburg, Austria,
September 17-20, 2018.
"""
from pyinduct.examples.string_with_mass.control import *
from pyinduct.hyperbolic.feedforward import FlatString
import pyinduct as pi
import pickle
import time
def run(show_plots):
# control mode
control_mode = ["open_loop",
"closed_loop",
"modal_observer",
"fem_observer"][2]
# constant observer initial error
ie = 0.2
# domains
z_end = 1
spatial_discretization = 100
spatial_domain = pi.Domain((0, z_end), spatial_discretization)
spat_domain_cf = pi.Domain((-z_end, z_end), spatial_discretization)
t_end = 30
temporal_discretization = int(30 * t_end)
temporal_domain = pi.Domain((0, t_end), temporal_discretization)
# planning input trajectories
smooth_transition1 = pi.SmoothTransition(
(0, 1), (2, 4), method="poly", differential_order=2)
smooth_transition2 = pi.SmoothTransition(
(0, -1.5), (23, 25), method="poly", differential_order=2)
not_too_smooth_transition = pi.SmoothTransition(
(0, -.5), (14, 14.2), method="poly", differential_order=2)
closed_loop_traj1 = SecondOrderFeedForward(smooth_transition1)
closed_loop_traj2 = SecondOrderFeedForward(smooth_transition2)
disturbance = SecondOrderFeedForward(not_too_smooth_transition)
open_loop_traj = FlatString(
y0=0, y1=1, z0=spatial_domain.bounds[0], z1=spatial_domain.bounds[1],
t0=1, dt=3, params=param)
# set up bases
sys_fem_lbl = "fem_system"
sys_modal_lbl = "modal_system"
obs_fem_lbl = "fem_observer"
obs_modal_lbl = "modal_observer"
n1 = 11
n2 = 11
n_obs_fem = 11
n_obs_modal = 16
build_fem_bases(sys_fem_lbl, n1, n2, obs_fem_lbl, n_obs_fem, sys_modal_lbl)
build_modal_bases(sys_modal_lbl, n_obs_modal, obs_modal_lbl, n_obs_modal)
# controller
controller = build_controller(sys_fem_lbl, sys_modal_lbl)
if control_mode == "open_loop":
input_ = pi.SimulationInputSum([open_loop_traj])
else:
input_ = pi.SimulationInputSum(
[closed_loop_traj1, controller, disturbance, closed_loop_traj2])
# observer error
obs_fem_error, obs_modal_error = init_observer_gain(
sys_fem_lbl, sys_modal_lbl, obs_fem_lbl, obs_modal_lbl)
# input / observer error vector
input_vector = pi.SimulationInputVector([input_, obs_fem_error, obs_modal_error])
control = pi.Input(input_vector, index=0)
yt_fem = pi.Input(input_vector, index=1)
yt_modal = pi.Input(input_vector, index=2)
# system approximation
sys_wf = build_original_weak_formulation(
sys_fem_lbl, spatial_domain, control, sys_fem_lbl)
obs_fem_wf = build_canonical_weak_formulation(
obs_fem_lbl, spat_domain_cf, control, yt_fem, obs_fem_lbl)
obs_modal_wf = build_canonical_weak_formulation(
obs_modal_lbl, spat_domain_cf, control, yt_modal, obs_modal_lbl)
# set control mode
apply_control_mode(sys_fem_lbl, sys_modal_lbl, obs_fem_lbl, obs_modal_lbl,
control_mode)
# define initial conditions
init_cond = {
sys_wf.name: [SwmBaseFraction(
[pi.ConstantFunction(0, domain=spatial_domain.bounds),
pi.ConstantFunction(0, domain=spatial_domain.bounds)],
[0, 0])],
obs_fem_wf.name: [SwmBaseCanonicalFraction(
[pi.Function(lambda th: ie * (2 - th), (-1, 1))], [0, ie * 4])],
obs_modal_wf.name: [SwmBaseCanonicalFraction(
[pi.Function(lambda th: ie * (2 - th), (-1, 1))], [0, ie * 4])]
}
# simulation
spatial_domains = {sys_wf.name: spatial_domain,
obs_fem_wf.name: spat_domain_cf,
obs_modal_wf.name: spat_domain_cf}
intermediate_results = list()
_ = pi.simulate_systems(
[sys_wf, obs_fem_wf, obs_modal_wf],
init_cond, temporal_domain, spatial_domains, out=intermediate_results
)
ceq, ss, init_weights, weights = intermediate_results
# print some stuff for debugging
# check_eigenvalues(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl, ceq, ss)
# visualization data
split_indizes = [n1 + n2,
n1 + n2 + n_obs_fem,
n1 + n2 + n_obs_fem + n_obs_modal]
# system
weights_sys = weights[:, :split_indizes[0]]
eval_data1 = pi.get_sim_result(sys_fem_lbl + "_1_visu", weights_sys,
temporal_domain, spatial_domain, 0, 0,
name="x1(z,t)")[0]
# fem observer
weights_fem_obs = weights[:, split_indizes[0]: split_indizes[1]]
fem_obs_ed = pi.get_sim_result(sys_fem_lbl + "_1_trafo_visu",
weights_fem_obs,
temporal_domain, spatial_domain,
0, 0,
name=r"\hat x1_fem(z,t)")[0]
# modal observer
weights_modal_obs = weights[:, split_indizes[1]: split_indizes[2]]
modal_obs_ed = pi.get_sim_result(sys_modal_lbl + "_1_trafo_visu",
weights_modal_obs,
temporal_domain, spatial_domain,
0, 0,
name=r"\hat x1_modal(z,t)")[0]
pi.tear_down([sys_fem_lbl, sys_modal_lbl, obs_fem_lbl, obs_modal_lbl])
if show_plots:
# create plots
plots = list()
plots.append(SwmPgAnimatedPlot([eval_data1, modal_obs_ed]))
plots.append(pi.surface_plot([eval_data1, modal_obs_ed]))
pi.show()
# save results
if 0:
timestamp = time.strftime("%Y-%m-%d__%H-%M-%S__")
path = "results/"
conf = "{}__({}-{}-{})__".format(
control_mode, n1 + n2, n_obs_fem, n_obs_modal)
description = input("result description:").replace(" ", "-")
file = open(path + timestamp + conf + description + ".pkl", "wb")
pickle.dump([eval_data1, fem_obs_ed, modal_obs_ed], file)
file.close()
if __name__ == "__main__":
run(True)
| {
"content_hash": "e0a15c67085a1cf82236245b42e607b2",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 85,
"avg_line_length": 38.8698224852071,
"alnum_prop": 0.5897396864058456,
"repo_name": "cklb/pyinduct",
"id": "ad62bc8c7518b40bf124e051ebf34d1172fc50f1",
"size": "6574",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "pyinduct/examples/string_with_mass/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "782564"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
from werkzeug import url_encode
from openerp import SUPERUSER_ID
from openerp import tools, api
from openerp.addons.base.res.res_partner import format_address
from openerp.addons.crm import crm_stage
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import email_re, email_split
from openerp.exceptions import UserError, AccessError
_logger = logging.getLogger(__name__)
CRM_LEAD_FIELDS_TO_MERGE = ['name',
'partner_id',
'campaign_id',
'company_id',
'country_id',
'team_id',
'state_id',
'stage_id',
'medium_id',
'source_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'email',
'fax',
'mobile',
'partner_name',
'phone',
'probability',
'planned_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'date_action_next',
'email_from',
'email_cc',
'partner_name']
class crm_lead(format_address, osv.osv):
""" CRM Lead Case """
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority desc,date_action,id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin', 'utm.mixin']
_mail_mass_mailing = _('Leads / Opportunities')
def _get_default_probability(self, cr, uid, context=None):
""" Gives default probability """
stage_id = self._get_default_stage_id(cr, uid, context=context)
if stage_id:
return self.pool['crm.stage'].browse(cr, uid, stage_id, context=context).probability
else:
return 10
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
team_id = self.pool['crm.team']._get_default_team_id(cr, SUPERUSER_ID, context=context, user_id=uid)
return self.stage_find(cr, uid, [], team_id, [('fold', '=', False)], context=context)
def _resolve_type_from_context(self, cr, uid, context=None):
""" Returns the type (lead or opportunity) from the type context
key. Returns None if it cannot be resolved.
"""
if context is None:
context = {}
return context.get('default_type')
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('crm.stage')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve team_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('fold', '=', False): add default columns that are not folded
# - OR ('team_ids', '=', team_id), ('fold', '=', False) if team_id: add team columns that are not folded
search_domain = []
team_id = context and context.get('default_team_id') or False
if team_id:
search_domain += ['|', ('team_ids', '=', team_id)]
search_domain += [('id', 'in', ids)]
else:
search_domain += [('id', 'in', ids)]
# retrieve type from the context (if set: choose 'type' or 'both')
type = self._resolve_type_from_context(cr, uid, context=context)
if type:
search_domain += ['|', ('type', '=', type), ('type', '=', 'both')]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context and context.get('opportunity_id'):
action = self.get_formview_action(cr, user, context['opportunity_id'], context=context)
if action.get('views') and any(view_id for view_id in action['views'] if view_id[1] == view_type):
view_id = next(view_id[0] for view_id in action['views'] if view_id[1] == view_type)
res = super(crm_lead, self).fields_view_get(cr, user, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
:return dict: difference between current date and log date
"""
res = {}
for lead in self.browse(cr, uid, ids, context=context):
for field in fields:
res[lead.id] = {}
duration = 0
ans = False
if field == 'day_open':
if lead.date_open:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_open = datetime.strptime(lead.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
elif field == 'day_close':
if lead.date_closed:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_close = datetime.strptime(lead.date_closed, "%Y-%m-%d %H:%M:%S")
ans = date_close - date_create
if ans:
duration = abs(int(ans.days))
res[lead.id][field] = duration
return res
def _meeting_count(self, cr, uid, ids, field_name, arg, context=None):
Event = self.pool['calendar.event']
return {
opp_id: Event.search_count(cr,uid, [('opportunity_id', '=', opp_id)], context=context)
for opp_id in ids
}
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null', track_visibility='onchange',
select=True, help="Linked partner (optional). Usually created when converting the lead."),
'id': fields.integer('ID', readonly=True),
'name': fields.char('Opportunity', required=True, select=1),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'email_from': fields.char('Email', size=128, help="Email address of the contact", select=1),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id',
select=True, track_visibility='onchange', help='When sending mails, the default email address is taken from the sales team.'),
'create_date': fields.datetime('Creation Date', readonly=True),
'email_cc': fields.text('Global CC', help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'description': fields.text('Notes'),
'write_date': fields.datetime('Update Date', readonly=True),
'tag_ids': fields.many2many('crm.lead.tag', 'crm_lead_tag_rel', 'lead_id', 'tag_id', 'Tags', help="Classify and analyze your lead/opportunity categories like: Training, Service"),
'contact_name': fields.char('Contact Name', size=64),
'partner_name': fields.char("Customer Name", size=64,help='The name of the future partner company that will be created while converting the lead into opportunity', select=1),
'opt_out': fields.boolean('Opt-Out', oldname='optout',
help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. "
"Filter 'Available for Mass Mailing' allows users to filter the leads when performing mass mailing."),
'type': fields.selection(
[('lead', 'Lead'), ('opportunity', 'Opportunity')],
string='Type', select=True, required=True,
help="Type is used to separate Leads and Opportunities"),
'priority': fields.selection(crm_stage.AVAILABLE_PRIORITIES, 'Rating', select=True),
'date_closed': fields.datetime('Closed', readonly=True, copy=False),
'stage_id': fields.many2one('crm.stage', 'Stage', track_visibility='onchange', select=True,
domain="['&', ('team_ids', '=', team_id), '|', ('type', '=', type), ('type', '=', 'both')]"),
'user_id': fields.many2one('res.users', 'Salesperson', select=True, track_visibility='onchange'),
'referred': fields.char('Referred By'),
'date_open': fields.datetime('Assigned', readonly=True),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='day_open', type="float",
store={'crm.lead': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='day_open', type="float",
store={'crm.lead': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'date_conversion': fields.datetime('Conversion Date', readonly=1),
# Messaging and marketing
'message_bounce': fields.integer('Bounce', help="Counter of the number of bounced emails for this contact"),
# Only used for type opportunity
'probability': fields.float('Probability', group_operator="avg"),
'planned_revenue': fields.float('Expected Revenue', track_visibility='always'),
'date_deadline': fields.date('Expected Closing', help="Estimate of the date on which the opportunity will be won."),
# CRM Actions
'last_activity_id': fields.many2one("crm.activity", "Last Activity", select=True),
'next_activity_id': fields.many2one("crm.activity", "Next Activity", select=True),
'next_activity_1': fields.related("last_activity_id", "activity_1_id", "name", type="char", string="Next Activity 1"),
'next_activity_2': fields.related("last_activity_id", "activity_2_id", "name", type="char", string="Next Activity 2"),
'next_activity_3': fields.related("last_activity_id", "activity_3_id", "name", type="char", string="Next Activity 3"),
'date_action': fields.date('Next Activity Date', select=True),
'title_action': fields.char('Next Activity Summary'),
'color': fields.integer('Color Index'),
'partner_address_name': fields.related('partner_id', 'name', type='char', string='Partner Contact Name', readonly=True),
'partner_address_email': fields.related('partner_id', 'email', type='char', string='Partner Contact Email', readonly=True),
'company_currency': fields.related('company_id', 'currency_id', type='many2one', string='Currency', readonly=True, relation="res.currency"),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'user_login': fields.related('user_id', 'login', type='char', string='User Login', readonly=True),
# Fields for address, due to separation from crm and res.partner
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'state_id': fields.many2one("res.country.state", 'State'),
'country_id': fields.many2one('res.country', 'Country'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'mobile': fields.char('Mobile'),
'function': fields.char('Function'),
'title': fields.many2one('res.partner.title', 'Title'),
'company_id': fields.many2one('res.company', 'Company', select=1),
'meeting_count': fields.function(_meeting_count, string='# Meetings', type='integer'),
'lost_reason': fields.many2one('crm.lost.reason', 'Lost Reason', select=True, track_visibility='onchange'),
}
_defaults = {
'active': 1,
'type': lambda s, cr, uid, c: 'lead' if s.pool['res.users'].has_group(cr, uid, 'crm.group_use_lead') else 'opportunity',
'user_id': lambda s, cr, uid, c: uid,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'team_id': lambda s, cr, uid, c: s.pool['crm.team']._get_default_team_id(cr, SUPERUSER_ID, context=c, user_id=uid),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.lead', context=c),
'priority': lambda *a: crm_stage.AVAILABLE_PRIORITIES[0][0],
'probability': lambda s, cr, uid, c: s._get_default_probability(cr, uid, c),
'color': 0,
'date_last_stage_update': fields.datetime.now,
}
_sql_constraints = [
('check_probability', 'check(probability >= 0 and probability <= 100)', 'The probability of closing the deal should be between 0% and 100%!')
]
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['crm.stage'].browse(cr, uid, stage_id, context=context)
if not stage.on_change:
return {'value': {}}
return {'value': {'probability': stage.probability}}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
partner_name = (partner.parent_id and partner.parent_id.name) or (partner.is_company and partner.name) or False
values = {
'partner_name': partner_name,
'contact_name': (not partner.is_company and partner.name) or False,
'title': partner.title and partner.title.id or False,
'street': partner.street,
'street2': partner.street2,
'city': partner.city,
'state_id': partner.state_id and partner.state_id.id or False,
'country_id': partner.country_id and partner.country_id.id or False,
'email_from': partner.email,
'phone': partner.phone,
'mobile': partner.mobile,
'fax': partner.fax,
'zip': partner.zip,
'function': partner.function,
}
return {'value': values}
def on_change_user(self, cr, uid, ids, user_id, context=None):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
if not context:
context = {}
if user_id and context.get('team_id'):
team = self.pool['crm.team'].browse(cr, uid, context['team_id'], context=context)
if user_id in team.member_ids.ids:
return {}
team_id = self.pool['crm.team']._get_default_team_id(cr, uid, context=context, user_id=user_id)
return {'value': {'team_id': team_id}}
def stage_find(self, cr, uid, cases, team_id, domain=None, order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- type: stage type must be the same or 'both'
- team_id: if set, stages must belong to this team or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
if context is None:
context = {}
# check whether we should try to add a condition on type
avoid_add_type_term = any([term for term in domain if len(term) == 3 if term[0] == 'type'])
# collect all team_ids
team_ids = set()
types = ['both']
if not cases and context.get('default_type'):
ctx_type = context.get('default_type')
types += [ctx_type]
if team_id:
team_ids.add(team_id)
for lead in cases:
if lead.team_id:
team_ids.add(lead.team_id.id)
if lead.type not in types:
types.append(lead.type)
# OR all team_ids
search_domain = []
if team_ids:
search_domain += [('|')] * (len(team_ids) - 1)
for team_id in team_ids:
search_domain.append(('team_ids', '=', team_id))
# AND with cases types
if not avoid_add_type_term:
search_domain.append(('type', 'in', types))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.stage').search(cr, uid, search_domain, order=order, limit=1, context=context)
if stage_ids:
return stage_ids[0]
return False
def action_set_lost(self, cr, uid, ids, context=None):
""" Lost semantic: probability = 0, active = False """
return self.write(cr, uid, ids, {'probability': 0, 'active': False}, context=context)
# Backward compatibility
case_mark_lost = action_set_lost
def action_set_active(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'active': True}, context=context)
def action_set_unactive(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'active': False}, context=context)
def action_set_won(self, cr, uid, ids, context=None):
""" Won semantic: probability = 100 (active untouched) """
stages_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
stage_id = self.stage_find(cr, uid, [lead], lead.team_id.id or False, [('probability', '=', 100.0), ('on_change', '=', True)], context=context)
if stage_id:
if stages_leads.get(stage_id):
stages_leads[stage_id].append(lead.id)
else:
stages_leads[stage_id] = [lead.id]
for stage_id, lead_ids in stages_leads.items():
self.write(cr, uid, lead_ids, {'stage_id': stage_id}, context=context)
return self.write(cr, uid, ids, {'probability': 100}, context=context)
# Backward compatibility
case_mark_won = action_set_won
def log_next_activity_1(self, cr, uid, ids, context=None):
return self.set_next_activity(cr, uid, ids, next_activity_name='activity_1_id', context=context)
def log_next_activity_2(self, cr, uid, ids, context=None):
return self.set_next_activity(cr, uid, ids, next_activity_name='activity_2_id', context=context)
def log_next_activity_3(self, cr, uid, ids, context=None):
return self.set_next_activity(cr, uid, ids, next_activity_name='activity_3_id', context=context)
def set_next_activity(self, cr, uid, ids, next_activity_name, context=None):
for lead in self.browse(cr, uid, ids, context=context):
if not lead.last_activity_id:
continue
next_activity = next_activity_name and getattr(lead.last_activity_id, next_activity_name, False) or False
if next_activity:
date_action = False
if next_activity.days:
date_action = (datetime.now() + timedelta(days=next_activity.days)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
lead.write({
'next_activity_id': next_activity.id,
'date_action': date_action,
'title_action': next_activity.description,
})
return True
def log_next_activity_done(self, cr, uid, ids, context=None, next_activity_name=False):
to_clear_ids = []
for lead in self.browse(cr, uid, ids, context=context):
if not lead.next_activity_id:
continue
body_html = """<div><b>${object.next_activity_id.name}</b></div>
%if object.title_action:
<div>${object.title_action}</div>
%endif"""
body_html = self.pool['mail.template'].render_template(cr, uid, body_html, 'crm.lead', lead.id, context=context)
msg_id = lead.message_post(body_html, subtype_id=lead.next_activity_id.subtype_id.id)
to_clear_ids.append(lead.id)
self.write(cr, uid, [lead.id], {'last_activity_id': lead.next_activity_id.id}, context=context)
if to_clear_ids:
self.cancel_next_activity(cr, uid, to_clear_ids, context=context)
return True
def cancel_next_activity(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {
'next_activity_id': False,
'date_action': False,
'title_action': False,
}, context=context)
def onchange_next_activity_id(self, cr, uid, ids, next_activity_id, context=None):
if not next_activity_id:
return {'value': {
'next_action1': False,
'next_action2': False,
'next_action3': False,
'title_action': False,
'date_action': False,
}}
activity = self.pool['crm.activity'].browse(cr, uid, next_activity_id, context=context)
date_action = False
if activity.days:
date_action = (datetime.now() + timedelta(days=activity.days)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {
'next_activity_1': activity.activity_1_id and activity.activity_1_id.name or False,
'next_activity_2': activity.activity_2_id and activity.activity_2_id.name or False,
'next_activity_3': activity.activity_3_id and activity.activity_3_id.name or False,
'title_action': activity.description,
'date_action': date_action,
'last_activity_id': False,
}}
def _merge_get_result_type(self, cr, uid, opps, context=None):
"""
Define the type of the result of the merge. If at least one of the
element to merge is an opp, the resulting new element will be an opp.
Otherwise it will be a lead.
We'll directly use a list of browse records instead of a list of ids
for performances' sake: it will spare a second browse of the
leads/opps.
:param list opps: list of browse records containing the leads/opps to process
:return string type: the type of the final element
"""
for opp in opps:
if (opp.type == 'opportunity'):
return 'opportunity'
return 'lead'
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
"""
Prepare lead/opp data into a dictionary for merging. Different types
of fields are processed in different ways:
- text: all the values are concatenated
- m2m and o2m: those fields aren't processed
- m2o: the first not null value prevails (the other are dropped)
- any other type of field: same as m2o
:param list ids: list of ids of the leads to process
:param list fields: list of leads' fields to process
:return dict data: contains the merged values
"""
opportunities = self.browse(cr, uid, ids, context=context)
def _get_first_not_null(attr):
for opp in opportunities:
if hasattr(opp, attr) and bool(getattr(opp, attr)):
return getattr(opp, attr)
return False
def _get_first_not_null_id(attr):
res = _get_first_not_null(attr)
return res and res.id or False
def _concat_all(attr):
return '\n\n'.join(filter(lambda x: x, [getattr(opp, attr) or '' for opp in opportunities if hasattr(opp, attr)]))
# Process the fields' values
data = {}
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
if field.type in ('many2many', 'one2many'):
continue
elif field.type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name) # !!
elif field.type == 'text':
data[field_name] = _concat_all(field_name) #not lost
else:
data[field_name] = _get_first_not_null(field_name) #not lost
# Define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type(cr, uid, opportunities, context)
return data
def _mail_body(self, cr, uid, lead, fields, title=False, context=None):
body = []
if title:
body.append("%s\n" % (title))
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
value = ''
if field.type == 'selection':
if callable(field.selection):
key = field.selection(self, cr, uid, context=context)
else:
key = field.selection
value = dict(key).get(lead[field_name], lead[field_name])
elif field.type == 'many2one':
if lead[field_name]:
value = lead[field_name].sudo().name_get()[0][1]
elif field.type == 'many2many':
if lead[field_name]:
for val in lead[field_name]:
field_value = val.sudo().name_get()[0][1]
value += field_value + ","
else:
value = lead[field_name]
body.append("%s: %s" % (field.string, value or ''))
return "<br/>".join(body + ['<br/>'])
def _merge_notify(self, cr, uid, opportunity_id, opportunities, context=None):
"""
Create a message gathering merged leads/opps information.
"""
#TOFIX: mail template should be used instead of fix body, subject text
details = []
result_type = self._merge_get_result_type(cr, uid, opportunities, context)
if result_type == 'lead':
merge_message = _('Merged leads')
else:
merge_message = _('Merged opportunities')
subject = [merge_message]
for opportunity in opportunities:
subject.append(opportunity.name)
title = "%s : %s" % (opportunity.type == 'opportunity' and _('Merged opportunity') or _('Merged lead'), opportunity.name)
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
details.append(self._mail_body(cr, uid, opportunity, fields, title=title, context=context))
# Chatter message's subject
subject = subject[0] + ": " + ", ".join(subject[1:])
details = "\n\n".join(details)
return self.message_post(cr, uid, [opportunity_id], body=details, subject=subject, context=context)
def _merge_opportunity_history(self, cr, uid, opportunity_id, opportunities, context=None):
message = self.pool.get('mail.message')
for opportunity in opportunities:
for history in opportunity.message_ids:
message.write(cr, uid, history.id, {
'res_id': opportunity_id,
'subject' : _("From %s : %s") % (opportunity.name, history.subject)
}, context=context)
return True
def _merge_opportunity_attachments(self, cr, uid, opportunity_id, opportunities, context=None):
attach_obj = self.pool.get('ir.attachment')
# return attachments of opportunity
def _get_attachments(opportunity_id):
attachment_ids = attach_obj.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', opportunity_id)], context=context)
return attach_obj.browse(cr, uid, attachment_ids, context=context)
first_attachments = _get_attachments(opportunity_id)
#counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': opportunity_id,}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
values['name'] = "%s (%s)" % (attachment.name, count,),
count+=1
attachment.write(values)
return True
def get_duplicated_leads(self, cr, uid, ids, partner_id, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
lead = self.browse(cr, uid, ids[0], context=context)
email = lead.partner_id and lead.partner_id.email or lead.email_from
return self.pool['crm.lead']._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context)
def _get_duplicated_leads_by_emails(self, cr, uid, partner_id, email, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
partner_match_domain = []
for email in set(email_split(email) + [email]):
partner_match_domain.append(('email_from', '=ilike', email))
if partner_id:
partner_match_domain.append(('partner_id', '=', partner_id))
partner_match_domain = ['|'] * (len(partner_match_domain) - 1) + partner_match_domain
if not partner_match_domain:
return []
domain = partner_match_domain
if not include_lost:
domain += ['&', ('active', '=', True), ('probability', '<', 100)]
return self.search(cr, uid, domain, context=context)
def merge_dependences(self, cr, uid, highest, opportunities, context=None):
self._merge_notify(cr, uid, highest, opportunities, context=context)
self._merge_opportunity_history(cr, uid, highest, opportunities, context=context)
self._merge_opportunity_attachments(cr, uid, highest, opportunities, context=context)
def merge_opportunity(self, cr, uid, ids, user_id=False, team_id=False, context=None):
"""
Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
:param list ids: leads/opportunities ids to merge
:return int id: id of the resulting lead/opp
"""
if context is None:
context = {}
if len(ids) <= 1:
raise UserError(_('Please select more than one element (lead or opportunity) from the list view.'))
opportunities = self.browse(cr, uid, ids, context=context)
sequenced_opps = []
# Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
# The confidence level increases with the stage sequence, except when the stage probability is 0.0 (Lost cases)
# An Opportunity always has higher confidence level than a lead, unless its stage probability is 0.0
for opportunity in opportunities:
sequence = -1
if opportunity.stage_id and opportunity.stage_id.on_change:
sequence = opportunity.stage_id.sequence
sequenced_opps.append(((int(sequence != -1 and opportunity.type == 'opportunity'), sequence, -opportunity.id), opportunity))
sequenced_opps.sort(reverse=True)
opportunities = map(itemgetter(1), sequenced_opps)
ids = [opportunity.id for opportunity in opportunities]
highest = opportunities[0]
opportunities_rest = opportunities[1:]
tail_opportunities = opportunities_rest
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
merged_data = self._merge_data(cr, uid, ids, highest, fields, context=context)
if user_id:
merged_data['user_id'] = user_id
if team_id:
merged_data['team_id'] = team_id
# Merge notifications about loss of information
opportunities = [highest]
opportunities.extend(opportunities_rest)
self.merge_dependences(cr, uid, highest.id, tail_opportunities, context=context)
# Check if the stage is in the stages of the sales team. If not, assign the stage with the lowest sequence
if merged_data.get('team_id'):
team_stage_ids = self.pool.get('crm.stage').search(cr, uid, [('team_ids', 'in', merged_data['team_id']), ('type', 'in', [merged_data.get('type'), 'both'])], order='sequence', context=context)
if merged_data.get('stage_id') not in team_stage_ids:
merged_data['stage_id'] = team_stage_ids and team_stage_ids[0] or False
# Write merged data into first opportunity
self.write(cr, uid, [highest.id], merged_data, context=context)
# Delete tail opportunities
# We use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
self.unlink(cr, SUPERUSER_ID, [x.id for x in tail_opportunities], context=context)
return highest.id
def _convert_opportunity_data(self, cr, uid, lead, customer, team_id=False, context=None):
crm_stage = self.pool.get('crm.stage')
contact_id = False
if customer:
contact_id = self.pool.get('res.partner').address_get(cr, uid, [customer.id])['contact']
if not team_id:
team_id = lead.team_id and lead.team_id.id or False
val = {
'planned_revenue': lead.planned_revenue,
'probability': lead.probability,
'name': lead.name,
'partner_id': customer and customer.id or False,
'type': 'opportunity',
'date_open': fields.datetime.now(),
'email_from': customer and customer.email or lead.email_from,
'phone': customer and customer.phone or lead.phone,
'date_conversion': fields.datetime.now(),
}
if not lead.stage_id or lead.stage_id.type=='lead':
stage_id = self.stage_find(cr, uid, [lead], team_id, [('type', 'in', ['opportunity', 'both'])], context=context)
val['stage_id'] = stage_id
if stage_id:
val['probability'] = self.pool['crm.stage'].browse(cr, uid, stage_id, context=context).probability
return val
def convert_opportunity(self, cr, uid, ids, partner_id, user_ids=False, team_id=False, context=None):
customer = False
if partner_id:
partner = self.pool.get('res.partner')
customer = partner.browse(cr, uid, partner_id, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if not lead.active or lead.probability == 100:
continue
vals = self._convert_opportunity_data(cr, uid, lead, customer, team_id, context=context)
self.write(cr, uid, [lead.id], vals, context=context)
if user_ids or team_id:
self.allocate_salesman(cr, uid, ids, user_ids, team_id, context=context)
return True
def _lead_create_contact(self, cr, uid, lead, name, is_company, parent_id=False, context=None):
partner = self.pool.get('res.partner')
vals = {'name': name,
'user_id': lead.user_id.id,
'comment': lead.description,
'team_id': lead.team_id.id or False,
'parent_id': parent_id,
'phone': lead.phone,
'mobile': lead.mobile,
'email': tools.email_split(lead.email_from) and tools.email_split(lead.email_from)[0] or False,
'fax': lead.fax,
'title': lead.title and lead.title.id or False,
'function': lead.function,
'street': lead.street,
'street2': lead.street2,
'zip': lead.zip,
'city': lead.city,
'country_id': lead.country_id and lead.country_id.id or False,
'state_id': lead.state_id and lead.state_id.id or False,
'is_company': is_company,
'type': 'contact'
}
partner = partner.create(cr, uid, vals, context=context)
return partner
def _create_lead_partner(self, cr, uid, lead, context=None):
contact_id = False
contact_name = lead.contact_name or lead.email_from and self.pool.get('res.partner')._parse_partner_name(lead.email_from, context=context)[0] or False
if lead.partner_name:
partner_company_id = self._lead_create_contact(cr, uid, lead, lead.partner_name, True, context=context)
elif lead.partner_id:
partner_company_id = lead.partner_id.id
else:
partner_company_id = False
if contact_name:
contact_id = self._lead_create_contact(cr, uid, lead, contact_name, False, partner_company_id, context=context)
partner_id = contact_id or partner_company_id or self._lead_create_contact(cr, uid, lead, lead.name, False, context=context)
return partner_id
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to the specified partner_id
:param list ids: leads/opportunities ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
partner_ids = {}
for lead in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if lead.partner_id:
partner_ids[lead.id] = lead.partner_id.id
if action == 'create':
partner_id = self._create_lead_partner(cr, uid, lead, context)
self.pool['res.partner'].write(cr, uid, partner_id, {'team_id': lead.team_id and lead.team_id.id or False})
if partner_id:
lead.write({'partner_id': partner_id})
partner_ids[lead.id] = partner_id
return partner_ids
def allocate_salesman(self, cr, uid, ids, user_ids=None, team_id=False, context=None):
"""
Assign salesmen and salesteam to a batch of leads. If there are more
leads than salesmen, these salesmen will be assigned in round-robin.
E.g.: 4 salesmen (S1, S2, S3, S4) for 6 leads (L1, L2, ... L6). They
will be assigned as followed: L1 - S1, L2 - S2, L3 - S3, L4 - S4,
L5 - S1, L6 - S2.
:param list ids: leads/opportunities ids to process
:param list user_ids: salesmen to assign
:param int team_id: salesteam to assign
:return bool
"""
index = 0
for lead_id in ids:
value = {}
if team_id:
value['team_id'] = team_id
if user_ids:
value['user_id'] = user_ids[index]
# Cycle through user_ids
index = (index + 1) % len(user_ids)
if value:
self.write(cr, uid, [lead_id], value, context=context)
return True
def redirect_opportunity_view(self, cr, uid, opportunity_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get opportunity views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_oppor')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_oppor')
return {
'name': _('Opportunity'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'opportunity')],
'res_id': int(opportunity_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'), (False, 'kanban'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'context': {'default_type': 'opportunity'}
}
def redirect_lead_view(self, cr, uid, lead_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get lead views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_leads')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_leads')
return {
'name': _('Lead'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'lead')],
'res_id': int(lead_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def action_schedule_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule meeting on current opportunity.
:return dict: dictionary value for created Meeting view
"""
lead = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
partner_ids = [self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id]
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
res['context'] = {
'search_default_opportunity_id': lead.type == 'opportunity' and lead.id or False,
'default_opportunity_id': lead.type == 'opportunity' and lead.id or False,
'default_partner_id': lead.partner_id and lead.partner_id.id or False,
'default_partner_ids': partner_ids,
'default_team_id': lead.team_id and lead.team_id.id or False,
'default_name': lead.name,
}
return res
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('type') and not context.get('default_type'):
context['default_type'] = vals.get('type')
if vals.get('team_id') and not context.get('default_team_id'):
context['default_team_id'] = vals.get('team_id')
if vals.get('user_id') and 'date_open' not in vals:
vals['date_open'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(crm_lead, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
if vals.get('user_id') and 'date_open' not in vals:
vals['date_open'] = fields.datetime.now()
# stage change with new stage: update probability and date_closed
if vals.get('stage_id') and 'probability' not in vals:
onchange_stage_values = self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value']
vals.update(onchange_stage_values)
if vals.get('probability') >= 100 or not vals.get('active', True):
vals['date_closed'] = fields.datetime.now()
elif 'probability' in vals and vals['probability'] < 100:
vals['date_closed'] = False
return super(crm_lead, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if not context:
context = {}
lead = self.browse(cr, uid, id, context=context)
local_context = dict(context)
local_context.setdefault('default_type', lead.type)
local_context.setdefault('default_team_id', lead.team_id.id)
if lead.type == 'opportunity':
default['date_open'] = fields.datetime.now()
else:
default['date_open'] = False
return super(crm_lead, self).copy(cr, uid, id, default, context=local_context)
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'crm.team'
context['empty_list_help_id'] = context.get('default_team_id', None)
context['empty_list_help_document_name'] = _("opportunities")
if help:
alias_record = self.pool['ir.model.data'].xmlid_to_object(cr, uid, "crm.mail_alias_lead_info")
if alias_record and alias_record.alias_domain and alias_record.alias_name:
dynamic_help = '<p>%s</p>' % _("""All email incoming to %(link)s will automatically create new opportunity.
Update your business card, phone book, social media,... Send an email right now and see it here.""") % {
'link': "<a href='mailto:%(email)s'>%(email)s</a>" % {'email': '%s@%s' % (alias_record.alias_name, alias_record.alias_domain)}
}
return '<p class="oe_view_nocontent_create">%s</p>%s%s' % (
_('Click to add a new opportunity'),
help,
dynamic_help)
return super(crm_lead, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Mail Gateway
# ----------------------------------------
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'stage_id' in init_values and record.probability == 100 and record.stage_id and record.stage_id.on_change:
return 'crm.mt_lead_won'
elif 'active' in init_values and record.probability == 0 and not record.active:
return 'crm.mt_lead_lost'
elif 'stage_id' in init_values and record.stage_id and record.stage_id.sequence <= 1:
return 'crm.mt_lead_create'
elif 'stage_id' in init_values:
return 'crm.mt_lead_stage'
return super(crm_lead, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _notification_group_recipients(self, cr, uid, ids, message, recipients, done_ids, group_data, context=None):
""" Override the mail.thread method to handle salesman recipients.
Indeed those will have specific action in their notification emails. """
group_sale_salesman = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'base.group_sale_salesman')
for recipient in recipients:
if recipient.id in done_ids:
continue
if recipient.user_ids and group_sale_salesman in recipient.user_ids[0].groups_id.ids:
group_data['group_sale_salesman'] |= recipient
done_ids.add(recipient.id)
return super(crm_lead, self)._notification_group_recipients(cr, uid, ids, message, recipients, done_ids, group_data, context=context)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(crm_lead, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
lead = self.browse(cr, uid, ids[0], context=context)
won_action = self._notification_link_helper(cr, uid, ids, 'method', context=context, method='case_mark_won')
lost_action = self._notification_link_helper(cr, uid, ids, 'method', context=context, method='case_mark_lost')
convert_action = self._notification_link_helper(cr, uid, ids, 'method', context=context, method='convert_opportunity', partner_id=lead.partner_id.id)
if lead.type == 'lead':
res['group_sale_salesman'] = {
'actions': [{'url': convert_action, 'title': 'Convert to opportunity'}]
}
else:
res['group_sale_salesman'] = {
'actions': [
{'url': won_action, 'title': 'Won'},
{'url': lost_action, 'title': 'Lost'}]
}
return res
@api.cr_uid_context
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Override to get the reply_to of the parent project. """
leads = self.browse(cr, SUPERUSER_ID, ids, context=context)
team_ids = set([lead.team_id.id for lead in leads if lead.team_id])
aliases = self.pool['crm.team'].message_get_reply_to(cr, uid, list(team_ids), default=default, context=context)
return dict((lead.id, aliases.get(lead.team_id and lead.team_id.id or 0, False)) for lead in leads)
def get_formview_id(self, cr, uid, id, context=None):
obj = self.browse(cr, uid, id, context=context)
if obj.type == 'opportunity':
model, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm', 'crm_case_form_view_oppor')
else:
view_id = super(crm_lead, self).get_formview_id(cr, uid, id, context=context)
return view_id
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(crm_lead, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for lead in self.browse(cr, uid, ids, context=context):
if lead.partner_id:
lead._message_add_suggested_recipient(recipients, partner=lead.partner_id, reason=_('Customer'))
elif lead.email_from:
lead._message_add_suggested_recipient(recipients, email=lead.email_from, reason=_('Customer Email'))
except AccessError: # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
# remove default author when going through the mail gateway. Indeed we
# do not want to explicitly set user_id to False; however we do not
# want the gateway user to be responsible if no other responsible is
# found.
create_context = dict(context or {})
create_context['default_user_id'] = False
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
}
if msg.get('author_id'):
defaults.update(self.on_change_partner_id(cr, uid, None, msg.get('author_id'), context=context)['value'])
if msg.get('priority') in dict(crm_stage.AVAILABLE_PRIORITIES):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_lead, self).message_new(cr, uid, msg, custom_values=defaults, context=create_context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Overrides mail_thread message_update that is called by the mailgateway
through message_process.
This method updates the document according to the email.
"""
if isinstance(ids, (str, int, long)):
ids = [ids]
if update_vals is None: update_vals = {}
if msg.get('priority') in dict(crm_stage.AVAILABLE_PRIORITIES):
update_vals['priority'] = msg.get('priority')
maps = {
'revenue': 'planned_revenue',
'probability':'probability',
}
for line in msg.get('body', '').split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res and maps.get(res.group(1).lower()):
key = maps.get(res.group(1).lower())
update_vals[key] = res.group(2).lower()
return super(crm_lead, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
def log_meeting(self, cr, uid, ids, meeting_subject, meeting_date, duration, context=None):
if not duration:
duration = _('unknown')
else:
duration = str(duration)
meet_date = datetime.strptime(meeting_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
meeting_usertime = fields.datetime.context_timestamp(cr, uid, meet_date, context=context).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
html_time = "<time datetime='%s+00:00'>%s</time>" % (meeting_date, meeting_usertime)
message = _("Meeting scheduled at '%s'<br> Subject: %s <br> Duration: %s hour(s)") % (html_time, meeting_subject, duration)
return self.message_post(cr, uid, ids, body=message, context=context)
def onchange_state(self, cr, uid, ids, state_id, context=None):
if state_id:
country_id=self.pool.get('res.country.state').browse(cr, uid, state_id, context).country_id.id
return {'value':{'country_id':country_id}}
return {}
def message_partner_info_from_emails(self, cr, uid, ids, emails, link_mail=False, context=None):
res = super(crm_lead, self).message_partner_info_from_emails(cr, uid, ids, emails, link_mail=link_mail, context=context)
lead = self.browse(cr, uid, ids[0], context=context)
for partner_info in res:
if not partner_info.get('partner_id') and (lead.partner_name or lead.contact_name):
emails = email_re.findall(partner_info['full_name'] or '')
email = emails and emails[0] or ''
if email and lead.email_from and email.lower() == lead.email_from.lower():
partner_info['full_name'] = '%s <%s>' % (lead.partner_name or lead.contact_name, email)
break
return res
def retrieve_sales_dashboard(self, cr, uid, context=None):
res = {
'meeting': {
'today': 0,
'next_7_days': 0,
},
'activity': {
'today': 0,
'overdue': 0,
'next_7_days': 0,
},
'closing': {
'today': 0,
'overdue': 0,
'next_7_days': 0,
},
'done': {
'this_month': 0,
'last_month': 0,
},
'won': {
'this_month': 0,
'last_month': 0,
},
'nb_opportunities': 0,
}
opportunities = self.search_read(
cr, uid,
[('type', '=', 'opportunity'), ('user_id', '=', uid)],
['date_deadline', 'next_activity_id', 'date_action', 'date_closed', 'planned_revenue'], context=context)
for opp in opportunities:
# Expected closing
if opp['date_deadline']:
date_deadline = datetime.strptime(opp['date_deadline'], tools.DEFAULT_SERVER_DATE_FORMAT).date()
if date_deadline == date.today():
res['closing']['today'] += 1
if date_deadline >= date.today() and date_deadline <= date.today() + timedelta(days=7):
res['closing']['next_7_days'] += 1
if date_deadline < date.today():
res['closing']['overdue'] += 1
# Next activities
if opp['next_activity_id'] and opp['date_action']:
date_action = datetime.strptime(opp['date_action'], tools.DEFAULT_SERVER_DATE_FORMAT).date()
if date_action == date.today():
res['activity']['today'] += 1
if date_action >= date.today() and date_action <= date.today() + timedelta(days=7):
res['activity']['next_7_days'] += 1
if date_action < date.today():
res['activity']['overdue'] += 1
# Won in Opportunities
if opp['date_closed']:
date_closed = datetime.strptime(opp['date_closed'], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
if date_closed <= date.today() and date_closed >= date.today().replace(day=1):
if opp['planned_revenue']:
res['won']['this_month'] += opp['planned_revenue']
elif date_closed < date.today().replace(day=1) and date_closed >= date.today().replace(day=1) - relativedelta(months=+1):
if opp['planned_revenue']:
res['won']['last_month'] += opp['planned_revenue']
# crm.activity is a very messy model so we need to do that in order to retrieve the actions done.
cr.execute("""
SELECT
m.id,
m.subtype_id,
m.date,
l.user_id,
l.type
FROM
"mail_message" m
LEFT JOIN
"crm_lead" l
ON
(m.res_id = l.id)
INNER JOIN
"crm_activity" a
ON
(m.subtype_id = a.subtype_id)
WHERE
(m.model = 'crm.lead') AND (l.user_id = %s) AND (l.type = 'opportunity')
""", (uid,))
activites_done = cr.dictfetchall()
for act in activites_done:
if act['date']:
date_act = datetime.strptime(act['date'], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
if date_act <= date.today() and date_act >= date.today().replace(day=1):
res['done']['this_month'] += 1
elif date_act < date.today().replace(day=1) and date_act >= date.today().replace(day=1) - relativedelta(months=+1):
res['done']['last_month'] += 1
# Meetings
min_date = datetime.now().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
max_date = (datetime.now() + timedelta(days=8)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
meetings_domain = [
('start', '>=', min_date),
('start', '<=', max_date)
]
# We need to add 'mymeetings' in the context for the search to be correct.
meetings = self.pool.get('calendar.event').search_read(cr, uid, meetings_domain, ['start'], context=context.update({'mymeetings': 1}) if context else {'mymeetings': 1})
for meeting in meetings:
if meeting['start']:
start = datetime.strptime(meeting['start'], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
if start == date.today():
res['meeting']['today'] += 1
if start >= date.today() and start <= date.today() + timedelta(days=7):
res['meeting']['next_7_days'] += 1
res['nb_opportunities'] = len(opportunities)
user = self.pool('res.users').browse(cr, uid, uid, context=context)
res['done']['target'] = user.target_sales_done
res['won']['target'] = user.target_sales_won
res['currency_id'] = user.company_id.currency_id.id
return res
def modify_target_sales_dashboard(self, cr, uid, target_name, target_value, context=None):
if target_name in ['won', 'done', 'invoiced']:
# bypass rights (with superuser_id)
self.pool('res.users').write(cr, SUPERUSER_ID, [uid], {'target_sales_' + target_name: target_value}, context=context)
else:
raise UserError(_('This target does not exist.'))
class crm_lead_tag(osv.Model):
_name = "crm.lead.tag"
_description = "Category of lead"
_columns = {
'name': fields.char('Name', required=True),
'color': fields.integer('Color Index'),
'team_id': fields.many2one('crm.team', 'Sales Team'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class crm_lost_reason(osv.Model):
_name = "crm.lost.reason"
_description = 'Reason for loosing leads'
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': True,
}
| {
"content_hash": "24fa47c14b1b59db236cf3949669e8d3",
"timestamp": "",
"source": "github",
"line_count": 1259,
"max_line_length": 222,
"avg_line_length": 49.019857029388405,
"alnum_prop": 0.5829930650074535,
"repo_name": "vileopratama/vitech",
"id": "c49abd90a146b543e57cf5b4e3d41335e3e7ceb2",
"size": "61816",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/addons/crm/crm_lead.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class MusicstreamingConfig(AppConfig):
name = 'musicstreaming'
| {
"content_hash": "338eb974ebaf0fbd1a123634eda79e0b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.7777777777777778,
"repo_name": "bdeloeste/unnamed-sxsw-hack",
"id": "0b19f5278b2f181ef1c2289df0a88d4409a8ac87",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicstreaming/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "382932"
},
{
"name": "HTML",
"bytes": "25108"
},
{
"name": "JavaScript",
"bytes": "235436"
},
{
"name": "Python",
"bytes": "11499"
}
],
"symlink_target": ""
} |
from StringIO import StringIO
from satori.ars.thrift import ThriftWriter
from satori.core.export import generate_interface
import satori.core.models
ars_interface = generate_interface()
writer = ThriftWriter()
idl_io = StringIO()
writer.write_to(ars_interface, idl_io)
thrift_idl = idl_io.getvalue()
del writer
del idl_io
| {
"content_hash": "c5fd9916f7db7c8a054d44aece4dc388",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 19.294117647058822,
"alnum_prop": 0.7835365853658537,
"repo_name": "zielmicha/satori",
"id": "92f702b61d8031838f72eccc6756e10004cdc9ff",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.core/satori/core/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
import sys
if sys.version > '3':
basestring = str
from pyspark import since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.linalg import _convert_to_vector
from pyspark.ml.param.shared import *
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer, _jvm
from pyspark.ml.common import inherit_doc
__all__ = ['Binarizer',
'BucketedRandomProjectionLSH', 'BucketedRandomProjectionLSHModel',
'Bucketizer',
'ChiSqSelector', 'ChiSqSelectorModel',
'CountVectorizer', 'CountVectorizerModel',
'DCT',
'ElementwiseProduct',
'FeatureHasher',
'HashingTF',
'IDF', 'IDFModel',
'Imputer', 'ImputerModel',
'IndexToString',
'MaxAbsScaler', 'MaxAbsScalerModel',
'MinHashLSH', 'MinHashLSHModel',
'MinMaxScaler', 'MinMaxScalerModel',
'NGram',
'Normalizer',
'OneHotEncoder',
'PCA', 'PCAModel',
'PolynomialExpansion',
'QuantileDiscretizer',
'RegexTokenizer',
'RFormula', 'RFormulaModel',
'SQLTransformer',
'StandardScaler', 'StandardScalerModel',
'StopWordsRemover',
'StringIndexer', 'StringIndexerModel',
'Tokenizer',
'VectorAssembler',
'VectorIndexer', 'VectorIndexerModel',
'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Binarize a column of continuous features given a threshold.
>>> df = spark.createDataFrame([(0.5,)], ["values"])
>>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features")
>>> binarizer.transform(df).head().features
0.0
>>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs
0.0
>>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"}
>>> binarizer.transform(df, params).head().vector
1.0
>>> binarizerPath = temp_path + "/binarizer"
>>> binarizer.save(binarizerPath)
>>> loadedBinarizer = Binarizer.load(binarizerPath)
>>> loadedBinarizer.getThreshold() == binarizer.getThreshold()
True
.. versionadded:: 1.4.0
"""
threshold = Param(Params._dummy(), "threshold",
"threshold in binary classification prediction, in range [0, 1]",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, threshold=0.0, inputCol=None, outputCol=None):
"""
__init__(self, threshold=0.0, inputCol=None, outputCol=None)
"""
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self._setDefault(threshold=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, threshold=0.0, inputCol=None, outputCol=None):
"""
setParams(self, threshold=0.0, inputCol=None, outputCol=None)
Sets params for this Binarizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("1.4.0")
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class LSHParams(Params):
"""
Mixin for Locality Sensitive Hashing (LSH) algorithm parameters.
"""
numHashTables = Param(Params._dummy(), "numHashTables", "number of hash tables, where " +
"increasing number of hash tables lowers the false negative rate, " +
"and decreasing it improves the running performance.",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(LSHParams, self).__init__()
def setNumHashTables(self, value):
"""
Sets the value of :py:attr:`numHashTables`.
"""
return self._set(numHashTables=value)
def getNumHashTables(self):
"""
Gets the value of numHashTables or its default value.
"""
return self.getOrDefault(self.numHashTables)
class LSHModel(JavaModel):
"""
Mixin for Locality Sensitive Hashing (LSH) models.
"""
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol)
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
@inherit_doc
class BucketedRandomProjectionLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Euclidean distance metrics.
The input is dense or sparse vectors, each of which represents a point in the Euclidean
distance space. The output will be vectors of configurable dimension. Hash values in the same
dimension are calculated by the same hash function.
.. seealso:: `Stable Distributions \
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Stable_distributions>`_
.. seealso:: `Hashing for Similarity Search: A Survey <https://arxiv.org/abs/1408.2927>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.dense([-1.0, -1.0 ]),),
... (1, Vectors.dense([-1.0, 1.0 ]),),
... (2, Vectors.dense([1.0, -1.0 ]),),
... (3, Vectors.dense([1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> brp = BucketedRandomProjectionLSH(inputCol="features", outputCol="hashes",
... seed=12345, bucketLength=1.0)
>>> model = brp.fit(df)
>>> model.transform(df).head()
Row(id=0, features=DenseVector([-1.0, -1.0]), hashes=[DenseVector([-1.0])])
>>> data2 = [(4, Vectors.dense([2.0, 2.0 ]),),
... (5, Vectors.dense([2.0, 3.0 ]),),
... (6, Vectors.dense([3.0, 2.0 ]),),
... (7, Vectors.dense([3.0, 3.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> model.approxNearestNeighbors(df2, Vectors.dense([1.0, 2.0]), 1).collect()
[Row(id=4, features=DenseVector([2.0, 2.0]), hashes=[DenseVector([1.0])], distCol=1.0)]
>>> model.approxSimilarityJoin(df, df2, 3.0, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> brpPath = temp_path + "/brp"
>>> brp.save(brpPath)
>>> brp2 = BucketedRandomProjectionLSH.load(brpPath)
>>> brp2.getBucketLength() == brp.getBucketLength()
True
>>> modelPath = temp_path + "/brp-model"
>>> model.save(modelPath)
>>> model2 = BucketedRandomProjectionLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
bucketLength = Param(Params._dummy(), "bucketLength", "the length of each hash bucket, " +
"a larger bucket lowers the false negative rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
"""
super(BucketedRandomProjectionLSH, self).__init__()
self._java_obj = \
self._new_java_obj("org.apache.spark.ml.feature.BucketedRandomProjectionLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
Sets params for this BucketedRandomProjectionLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setBucketLength(self, value):
"""
Sets the value of :py:attr:`bucketLength`.
"""
return self._set(bucketLength=value)
@since("2.2.0")
def getBucketLength(self):
"""
Gets the value of bucketLength or its default value.
"""
return self.getOrDefault(self.bucketLength)
def _create_model(self, java_model):
return BucketedRandomProjectionLSHModel(java_model)
class BucketedRandomProjectionLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`BucketedRandomProjectionLSH`, where multiple random vectors are
stored. The vectors are normalized to be unit vectors and each vector is used in a hash
function: :math:`h_i(x) = floor(r_i \cdot x / bucketLength)` where :math:`r_i` is the
i-th random unit vector. The number of buckets will be `(max L2 norm of input vectors) /
bucketLength`.
.. versionadded:: 2.2.0
"""
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
Maps a column of continuous features to a column of feature buckets.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> bucketizer = Bucketizer(splits=[-float("inf"), 0.5, 1.4, float("inf")],
... inputCol="values", outputCol="buckets")
>>> bucketed = bucketizer.setHandleInvalid("keep").transform(df).collect()
>>> len(bucketed)
6
>>> bucketed[0].buckets
0.0
>>> bucketed[1].buckets
0.0
>>> bucketed[2].buckets
1.0
>>> bucketed[3].buckets
2.0
>>> bucketizer.setParams(outputCol="b").transform(df).head().b
0.0
>>> bucketizerPath = temp_path + "/bucketizer"
>>> bucketizer.save(bucketizerPath)
>>> loadedBucketizer = Bucketizer.load(bucketizerPath)
>>> loadedBucketizer.getSplits() == bucketizer.getSplits()
True
>>> bucketed = bucketizer.setHandleInvalid("skip").transform(df).collect()
>>> len(bucketed)
4
.. versionadded:: 1.4.0
"""
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be of length >= 3 and strictly increasing. Values at -inf, inf must be " +
"explicitly provided to cover all Double values; otherwise, values outside the " +
"splits specified will be treated as errors.",
typeConverter=TypeConverters.toListFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this Bucketizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setSplits(self, value):
"""
Sets the value of :py:attr:`splits`.
"""
return self._set(splits=value)
@since("1.4.0")
def getSplits(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.splits)
@inherit_doc
class CountVectorizer(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Extracts a vocabulary from document collections and generates a :py:attr:`CountVectorizerModel`.
>>> df = spark.createDataFrame(
... [(0, ["a", "b", "c"]), (1, ["a", "b", "b", "c", "a"])],
... ["label", "raw"])
>>> cv = CountVectorizer(inputCol="raw", outputCol="vectors")
>>> model = cv.fit(df)
>>> model.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
>>> sorted(model.vocabulary) == ['a', 'b', 'c']
True
>>> countVectorizerPath = temp_path + "/count-vectorizer"
>>> cv.save(countVectorizerPath)
>>> loadedCv = CountVectorizer.load(countVectorizerPath)
>>> loadedCv.getMinDF() == cv.getMinDF()
True
>>> loadedCv.getMinTF() == cv.getMinTF()
True
>>> loadedCv.getVocabSize() == cv.getVocabSize()
True
>>> modelPath = temp_path + "/count-vectorizer-model"
>>> model.save(modelPath)
>>> loadedModel = CountVectorizerModel.load(modelPath)
>>> loadedModel.vocabulary == model.vocabulary
True
.. versionadded:: 1.6.0
"""
minTF = Param(
Params._dummy(), "minTF", "Filter to ignore rare words in" +
" a document. For each document, terms with frequency/count less than the given" +
" threshold are ignored. If this is an integer >= 1, then this specifies a count (of" +
" times the term must appear in the document); if this is a double in [0,1), then this " +
"specifies a fraction (out of the document's token count). Note that the parameter is " +
"only used in transform of CountVectorizerModel and does not affect fitting. Default 1.0",
typeConverter=TypeConverters.toFloat)
minDF = Param(
Params._dummy(), "minDF", "Specifies the minimum number of" +
" different documents a term must appear in to be included in the vocabulary." +
" If this is an integer >= 1, this specifies the number of documents the term must" +
" appear in; if this is a double in [0,1), then this specifies the fraction of documents." +
" Default 1.0", typeConverter=TypeConverters.toFloat)
vocabSize = Param(
Params._dummy(), "vocabSize", "max size of the vocabulary. Default 1 << 18.",
typeConverter=TypeConverters.toInt)
binary = Param(
Params._dummy(), "binary", "Binary toggle to control the output vector values." +
" If True, all nonzero counts (after minTF filter applied) are set to 1. This is useful" +
" for discrete probabilistic models that model binary events rather than integer counts." +
" Default False", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,
outputCol=None):
"""
__init__(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,\
outputCol=None)
"""
super(CountVectorizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.CountVectorizer",
self.uid)
self._setDefault(minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,
outputCol=None):
"""
setParams(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,\
outputCol=None)
Set the params for the CountVectorizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("1.6.0")
def getMinTF(self):
"""
Gets the value of minTF or its default value.
"""
return self.getOrDefault(self.minTF)
@since("1.6.0")
def setMinDF(self, value):
"""
Sets the value of :py:attr:`minDF`.
"""
return self._set(minDF=value)
@since("1.6.0")
def getMinDF(self):
"""
Gets the value of minDF or its default value.
"""
return self.getOrDefault(self.minDF)
@since("1.6.0")
def setVocabSize(self, value):
"""
Sets the value of :py:attr:`vocabSize`.
"""
return self._set(vocabSize=value)
@since("1.6.0")
def getVocabSize(self):
"""
Gets the value of vocabSize or its default value.
"""
return self.getOrDefault(self.vocabSize)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
def _create_model(self, java_model):
return CountVectorizerModel(java_model)
class CountVectorizerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`CountVectorizer`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def vocabulary(self):
"""
An array of terms in the vocabulary.
"""
return self._call_java("vocabulary")
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that takes the 1D discrete cosine transform
of a real vector. No zero padding is performed on the input vector.
It returns a real vector of the same length representing the DCT.
The return vector is scaled such that the transform matrix is
unitary (aka scaled DCT-II).
.. seealso:: `More information on Wikipedia \
<https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia>`_.
>>> from pyspark.ml.linalg import Vectors
>>> df1 = spark.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"])
>>> dct = DCT(inverse=False, inputCol="vec", outputCol="resultVec")
>>> df2 = dct.transform(df1)
>>> df2.head().resultVec
DenseVector([10.969..., -0.707..., -2.041...])
>>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2)
>>> df3.head().origVec
DenseVector([5.0, 8.0, 6.0])
>>> dctPath = temp_path + "/dct"
>>> dct.save(dctPath)
>>> loadedDtc = DCT.load(dctPath)
>>> loadedDtc.getInverse()
False
.. versionadded:: 1.6.0
"""
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inverse=False, inputCol=None, outputCol=None):
"""
__init__(self, inverse=False, inputCol=None, outputCol=None)
"""
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self._setDefault(inverse=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inverse=False, inputCol=None, outputCol=None):
"""
setParams(self, inverse=False, inputCol=None, outputCol=None)
Sets params for this DCT.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setInverse(self, value):
"""
Sets the value of :py:attr:`inverse`.
"""
return self._set(inverse=value)
@since("1.6.0")
def getInverse(self):
"""
Gets the value of inverse or its default value.
"""
return self.getOrDefault(self.inverse)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Outputs the Hadamard product (i.e., the element-wise product) of each input vector
with a provided "weight" vector. In other words, it scales each column of the dataset
by a scalar multiplier.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"])
>>> ep = ElementwiseProduct(scalingVec=Vectors.dense([1.0, 2.0, 3.0]),
... inputCol="values", outputCol="eprod")
>>> ep.transform(df).head().eprod
DenseVector([2.0, 2.0, 9.0])
>>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod
DenseVector([4.0, 3.0, 15.0])
>>> elementwiseProductPath = temp_path + "/elementwise-product"
>>> ep.save(elementwiseProductPath)
>>> loadedEp = ElementwiseProduct.load(elementwiseProductPath)
>>> loadedEp.getScalingVec() == ep.getScalingVec()
True
.. versionadded:: 1.5.0
"""
scalingVec = Param(Params._dummy(), "scalingVec", "Vector for hadamard product.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, scalingVec=None, inputCol=None, outputCol=None):
"""
__init__(self, scalingVec=None, inputCol=None, outputCol=None)
"""
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, scalingVec=None, inputCol=None, outputCol=None):
"""
setParams(self, scalingVec=None, inputCol=None, outputCol=None)
Sets params for this ElementwiseProduct.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setScalingVec(self, value):
"""
Sets the value of :py:attr:`scalingVec`.
"""
return self._set(scalingVec=value)
@since("2.0.0")
def getScalingVec(self):
"""
Gets the value of scalingVec or its default value.
"""
return self.getOrDefault(self.scalingVec)
@inherit_doc
class FeatureHasher(JavaTransformer, HasInputCols, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Feature hashing projects a set of categorical or numerical features into a feature vector of
specified dimension (typically substantially smaller than that of the original feature
space). This is done using the hashing trick (https://en.wikipedia.org/wiki/Feature_hashing)
to map features to indices in the feature vector.
The FeatureHasher transformer operates on multiple columns. Each column may contain either
numeric or categorical features. Behavior and handling of column data types is as follows:
* Numeric columns:
For numeric features, the hash value of the column name is used to map the
feature value to its index in the feature vector. Numeric features are never
treated as categorical, even when they are integers. You must explicitly
convert numeric columns containing categorical features to strings first.
* String columns:
For categorical features, the hash value of the string "column_name=value"
is used to map to the vector index, with an indicator value of `1.0`.
Thus, categorical features are "one-hot" encoded
(similarly to using :py:class:`OneHotEncoder` with `dropLast=false`).
* Boolean columns:
Boolean values are treated in the same way as string columns. That is,
boolean features are represented as "column_name=true" or "column_name=false",
with an indicator value of `1.0`.
Null (missing) values are ignored (implicitly zero in the resulting feature vector).
Since a simple modulo is used to transform the hash function to a vector index,
it is advisable to use a power of two as the `numFeatures` parameter;
otherwise the features will not be mapped evenly to the vector indices.
>>> data = [(2.0, True, "1", "foo"), (3.0, False, "2", "bar")]
>>> cols = ["real", "bool", "stringNum", "string"]
>>> df = spark.createDataFrame(data, cols)
>>> hasher = FeatureHasher(inputCols=cols, outputCol="features")
>>> hasher.transform(df).head().features
SparseVector(262144, {51871: 1.0, 63643: 1.0, 174475: 2.0, 253195: 1.0})
>>> hasherPath = temp_path + "/hasher"
>>> hasher.save(hasherPath)
>>> loadedHasher = FeatureHasher.load(hasherPath)
>>> loadedHasher.getNumFeatures() == hasher.getNumFeatures()
True
>>> loadedHasher.transform(df).head().features == hasher.transform(df).head().features
True
.. versionadded:: 2.3.0
"""
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None)
"""
super(FeatureHasher, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.FeatureHasher", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None)
Sets params for this FeatureHasher.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Maps a sequence of terms to their term frequencies using the hashing trick.
Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
to calculate the hash code value for the term object.
Since a simple modulo is used to transform the hash function to a column index,
it is advisable to use a power of two as the numFeatures parameter;
otherwise the features will not be mapped evenly to the columns.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
>>> hashingTF.transform(df).head().features
SparseVector(10, {0: 1.0, 1: 1.0, 2: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {0: 1.0, 1: 1.0, 2: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {0: 1.0, 1: 1.0, 2: 1.0})
>>> hashingTFPath = temp_path + "/hashing-tf"
>>> hashingTF.save(hashingTFPath)
>>> loadedHashingTF = HashingTF.load(hashingTFPath)
>>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures()
True
.. versionadded:: 1.3.0
"""
binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events " +
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Compute the Inverse Document Frequency (IDF) given a collection of documents.
>>> from pyspark.ml.linalg import DenseVector
>>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3, inputCol="tf", outputCol="idf")
>>> model = idf.fit(df)
>>> model.idf
DenseVector([0.0, 0.0])
>>> model.transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
>>> idfPath = temp_path + "/idf"
>>> idf.save(idfPath)
>>> loadedIdf = IDF.load(idfPath)
>>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq()
True
>>> modelPath = temp_path + "/idf-model"
>>> model.save(modelPath)
>>> loadedModel = IDFModel.load(modelPath)
>>> loadedModel.transform(df).head().idf == model.transform(df).head().idf
True
.. versionadded:: 1.4.0
"""
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum number of documents in which a term should appear for filtering",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self._setDefault(minDocFreq=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
return self._set(minDocFreq=value)
@since("1.4.0")
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`IDF`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def idf(self):
"""
Returns the IDF vector.
"""
return self._call_java("idf")
@inherit_doc
class Imputer(JavaEstimator, HasInputCols, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Imputation estimator for completing missing values, either using the mean or the median
of the columns in which the missing values are located. The input columns should be of
DoubleType or FloatType. Currently Imputer does not support categorical features and
possibly creates incorrect values for a categorical feature.
Note that the mean/median value is computed after filtering out missing values.
All Null values in the input columns are treated as missing, and so are also imputed. For
computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a
relative error of `0.001`.
>>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0),
... (4.0, 4.0), (5.0, 5.0)], ["a", "b"])
>>> imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
>>> model = imputer.fit(df)
>>> model.surrogateDF.show()
+---+---+
| a| b|
+---+---+
|3.0|4.0|
+---+---+
...
>>> model.transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 1.0| 4.0|
|2.0|NaN| 2.0| 4.0|
|NaN|3.0| 3.0| 3.0|
...
>>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 4.0| NaN|
...
>>> imputerPath = temp_path + "/imputer"
>>> imputer.save(imputerPath)
>>> loadedImputer = Imputer.load(imputerPath)
>>> loadedImputer.getStrategy() == imputer.getStrategy()
True
>>> loadedImputer.getMissingValue()
1.0
>>> modelPath = temp_path + "/imputer-model"
>>> model.save(modelPath)
>>> loadedModel = ImputerModel.load(modelPath)
>>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a
True
.. versionadded:: 2.2.0
"""
outputCols = Param(Params._dummy(), "outputCols",
"output column names.", typeConverter=TypeConverters.toListString)
strategy = Param(Params._dummy(), "strategy",
"strategy for imputation. If mean, then replace missing values using the mean "
"value of the feature. If median, then replace missing values using the "
"median value of the feature.",
typeConverter=TypeConverters.toString)
missingValue = Param(Params._dummy(), "missingValue",
"The placeholder for the missing values. All occurrences of missingValue "
"will be imputed.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
__init__(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None):
"""
super(Imputer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Imputer", self.uid)
self._setDefault(strategy="mean", missingValue=float("nan"))
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None)
Sets params for this Imputer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("2.2.0")
def getOutputCols(self):
"""
Gets the value of :py:attr:`outputCols` or its default value.
"""
return self.getOrDefault(self.outputCols)
@since("2.2.0")
def setStrategy(self, value):
"""
Sets the value of :py:attr:`strategy`.
"""
return self._set(strategy=value)
@since("2.2.0")
def getStrategy(self):
"""
Gets the value of :py:attr:`strategy` or its default value.
"""
return self.getOrDefault(self.strategy)
@since("2.2.0")
def setMissingValue(self, value):
"""
Sets the value of :py:attr:`missingValue`.
"""
return self._set(missingValue=value)
@since("2.2.0")
def getMissingValue(self):
"""
Gets the value of :py:attr:`missingValue` or its default value.
"""
return self.getOrDefault(self.missingValue)
def _create_model(self, java_model):
return ImputerModel(java_model)
class ImputerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`Imputer`.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def surrogateDF(self):
"""
Returns a DataFrame containing inputCols and their corresponding surrogates,
which are used to replace the missing values in the input DataFrame.
"""
return self._call_java("surrogateDF")
@inherit_doc
class MaxAbsScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to range [-1, 1] by dividing through the largest maximum
absolute value in each feature. It does not shift/center the data, and thus does not destroy
any sparsity.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> maScaler = MaxAbsScaler(inputCol="a", outputCol="scaled")
>>> model = maScaler.fit(df)
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[1.0]| [0.5]|
|[2.0]| [1.0]|
+-----+------+
...
>>> scalerPath = temp_path + "/max-abs-scaler"
>>> maScaler.save(scalerPath)
>>> loadedMAScaler = MaxAbsScaler.load(scalerPath)
>>> loadedMAScaler.getInputCol() == maScaler.getInputCol()
True
>>> loadedMAScaler.getOutputCol() == maScaler.getOutputCol()
True
>>> modelPath = temp_path + "/max-abs-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MaxAbsScalerModel.load(modelPath)
>>> loadedModel.maxAbs == model.maxAbs
True
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(MaxAbsScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MaxAbsScaler", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this MaxAbsScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MaxAbsScalerModel(java_model)
class MaxAbsScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MaxAbsScaler`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def maxAbs(self):
"""
Max Abs vector.
"""
return self._call_java("maxAbs")
@inherit_doc
class MinHashLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Jaccard distance.
The input can be dense or sparse vectors, but it is more efficient if it is sparse.
For example, `Vectors.sparse(10, [(2, 1.0), (3, 1.0), (5, 1.0)])` means there are 10 elements
in the space. This set contains elements 2, 3, and 5. Also, any input vector must have at
least 1 non-zero index, and all non-zero values are treated as binary "1" values.
.. seealso:: `Wikipedia on MinHash <https://en.wikipedia.org/wiki/MinHash>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
... (1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
... (2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> mh = MinHashLSH(inputCol="features", outputCol="hashes", seed=12345)
>>> model = mh.fit(df)
>>> model.transform(df).head()
Row(id=0, features=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), hashes=[DenseVector([-1638925...
>>> data2 = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
... (4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
... (5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> key = Vectors.sparse(6, [1, 2], [1.0, 1.0])
>>> model.approxNearestNeighbors(df2, key, 1).collect()
[Row(id=5, features=SparseVector(6, {1: 1.0, 2: 1.0, 4: 1.0}), hashes=[DenseVector([-163892...
>>> model.approxSimilarityJoin(df, df2, 0.6, distCol="JaccardDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("JaccardDistance")).show()
+---+---+---------------+
|idA|idB|JaccardDistance|
+---+---+---------------+
| 1| 4| 0.5|
| 0| 5| 0.5|
+---+---+---------------+
...
>>> mhPath = temp_path + "/mh"
>>> mh.save(mhPath)
>>> mh2 = MinHashLSH.load(mhPath)
>>> mh2.getOutputCol() == mh.getOutputCol()
True
>>> modelPath = temp_path + "/mh-model"
>>> model.save(modelPath)
>>> model2 = MinHashLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
"""
super(MinHashLSH, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
Sets params for this MinHashLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MinHashLSHModel(java_model)
class MinHashLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model produced by :py:class:`MinHashLSH`, where where multiple hash functions are stored. Each
hash function is picked from the following family of hash functions, where :math:`a_i` and
:math:`b_i` are randomly chosen integers less than prime:
:math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is approximately min-wise
independent according to the reference.
.. seealso:: Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear \
permutations." Electronic Journal of Combinatorics 7 (2000): R26.
.. versionadded:: 2.2.0
"""
@inherit_doc
class MinMaxScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to a common range [min, max] linearly using column summary
statistics, which is also known as min-max normalization or Rescaling. The rescaled value for
feature E is calculated as,
Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min
For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min)
.. note:: Since zero values will probably be transformed to non-zero values, output of the
transformer will be DenseVector even for sparse input.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> mmScaler = MinMaxScaler(inputCol="a", outputCol="scaled")
>>> model = mmScaler.fit(df)
>>> model.originalMin
DenseVector([0.0])
>>> model.originalMax
DenseVector([2.0])
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[0.0]| [0.0]|
|[2.0]| [1.0]|
+-----+------+
...
>>> minMaxScalerPath = temp_path + "/min-max-scaler"
>>> mmScaler.save(minMaxScalerPath)
>>> loadedMMScaler = MinMaxScaler.load(minMaxScalerPath)
>>> loadedMMScaler.getMin() == mmScaler.getMin()
True
>>> loadedMMScaler.getMax() == mmScaler.getMax()
True
>>> modelPath = temp_path + "/min-max-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MinMaxScalerModel.load(modelPath)
>>> loadedModel.originalMin == model.originalMin
True
>>> loadedModel.originalMax == model.originalMax
True
.. versionadded:: 1.6.0
"""
min = Param(Params._dummy(), "min", "Lower bound of the output feature range",
typeConverter=TypeConverters.toFloat)
max = Param(Params._dummy(), "max", "Upper bound of the output feature range",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
__init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
"""
super(MinMaxScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid)
self._setDefault(min=0.0, max=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
Sets params for this MinMaxScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
return self._set(min=value)
@since("1.6.0")
def getMin(self):
"""
Gets the value of min or its default value.
"""
return self.getOrDefault(self.min)
@since("1.6.0")
def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
return self._set(max=value)
@since("1.6.0")
def getMax(self):
"""
Gets the value of max or its default value.
"""
return self.getOrDefault(self.max)
def _create_model(self, java_model):
return MinMaxScalerModel(java_model)
class MinMaxScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MinMaxScaler`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def originalMin(self):
"""
Min value for each original column during fitting.
"""
return self._call_java("originalMin")
@property
@since("2.0.0")
def originalMax(self):
"""
Max value for each original column during fitting.
"""
return self._call_java("originalMax")
@inherit_doc
@ignore_unicode_prefix
class NGram(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that converts the input array of strings into an array of n-grams. Null
values in the input array are ignored.
It returns an array of n-grams where each n-gram is represented by a space-separated string of
words.
When the input is empty, an empty array is returned.
When the input array length is less than n (number of elements per n-gram), no n-grams are
returned.
>>> df = spark.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])])
>>> ngram = NGram(n=2, inputCol="inputTokens", outputCol="nGrams")
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b', u'b c', u'c d', u'd e'])
>>> # Change n-gram length
>>> ngram.setParams(n=4).transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Temporarily modify output column.
>>> ngram.transform(df, {ngram.outputCol: "output"}).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], output=[u'a b c d', u'b c d e'])
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Must use keyword arguments to specify params.
>>> ngram.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> ngramPath = temp_path + "/ngram"
>>> ngram.save(ngramPath)
>>> loadedNGram = NGram.load(ngramPath)
>>> loadedNGram.getN() == ngram.getN()
True
.. versionadded:: 1.5.0
"""
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, n=2, inputCol=None, outputCol=None):
"""
__init__(self, n=2, inputCol=None, outputCol=None)
"""
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self._setDefault(n=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, n=2, inputCol=None, outputCol=None):
"""
setParams(self, n=2, inputCol=None, outputCol=None)
Sets params for this NGram.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setN(self, value):
"""
Sets the value of :py:attr:`n`.
"""
return self._set(n=value)
@since("1.5.0")
def getN(self):
"""
Gets the value of n or its default value.
"""
return self.getOrDefault(self.n)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Normalize a vector to have unit norm using the given p-norm.
>>> from pyspark.ml.linalg import Vectors
>>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0})
>>> df = spark.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"])
>>> normalizer = Normalizer(p=2.0, inputCol="dense", outputCol="features")
>>> normalizer.transform(df).head().features
DenseVector([0.6, -0.8])
>>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs
SparseVector(4, {1: 0.8, 3: 0.6})
>>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"}
>>> normalizer.transform(df, params).head().vector
DenseVector([0.4286, -0.5714])
>>> normalizerPath = temp_path + "/normalizer"
>>> normalizer.save(normalizerPath)
>>> loadedNormalizer = Normalizer.load(normalizerPath)
>>> loadedNormalizer.getP() == normalizer.getP()
True
.. versionadded:: 1.4.0
"""
p = Param(Params._dummy(), "p", "the p norm value.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, p=2.0, inputCol=None, outputCol=None):
"""
__init__(self, p=2.0, inputCol=None, outputCol=None)
"""
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self._setDefault(p=2.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, p=2.0, inputCol=None, outputCol=None):
"""
setParams(self, p=2.0, inputCol=None, outputCol=None)
Sets params for this Normalizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setP(self, value):
"""
Sets the value of :py:attr:`p`.
"""
return self._set(p=value)
@since("1.4.0")
def getP(self):
"""
Gets the value of p or its default value.
"""
return self.getOrDefault(self.p)
@inherit_doc
class OneHotEncoder(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A one-hot encoder that maps a column of category indices to a
column of binary vectors, with at most a single one-value per row
that indicates the input category index.
For example with 5 categories, an input value of 2.0 would map to
an output vector of `[0.0, 0.0, 1.0, 0.0]`.
The last category is not included by default (configurable via
:py:attr:`dropLast`) because it makes the vector entries sum up to
one, and hence linearly dependent.
So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`.
.. note:: This is different from scikit-learn's OneHotEncoder,
which keeps all categories. The output vectors are sparse.
.. seealso::
:py:class:`StringIndexer` for converting categorical values into
category indices
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> encoder = OneHotEncoder(inputCol="indexed", outputCol="features")
>>> encoder.transform(td).head().features
SparseVector(2, {0: 1.0})
>>> encoder.setParams(outputCol="freqs").transform(td).head().freqs
SparseVector(2, {0: 1.0})
>>> params = {encoder.dropLast: False, encoder.outputCol: "test"}
>>> encoder.transform(td, params).head().test
SparseVector(3, {0: 1.0})
>>> onehotEncoderPath = temp_path + "/onehot-encoder"
>>> encoder.save(onehotEncoderPath)
>>> loadedEncoder = OneHotEncoder.load(onehotEncoderPath)
>>> loadedEncoder.getDropLast() == encoder.getDropLast()
True
.. versionadded:: 1.4.0
"""
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, dropLast=True, inputCol=None, outputCol=None):
"""
__init__(self, dropLast=True, inputCol=None, outputCol=None)
"""
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.OneHotEncoder", self.uid)
self._setDefault(dropLast=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, dropLast=True, inputCol=None, outputCol=None):
"""
setParams(self, dropLast=True, inputCol=None, outputCol=None)
Sets params for this OneHotEncoder.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
return self._set(dropLast=value)
@since("1.4.0")
def getDropLast(self):
"""
Gets the value of dropLast or its default value.
"""
return self.getOrDefault(self.dropLast)
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Perform feature expansion in a polynomial space. As said in `wikipedia of Polynomial Expansion
<http://en.wikipedia.org/wiki/Polynomial_expansion>`_, "In mathematics, an
expansion of a product of sums expresses it as a sum of products by using the fact that
multiplication distributes over addition". Take a 2-variable feature vector as an example:
`(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"])
>>> px = PolynomialExpansion(degree=2, inputCol="dense", outputCol="expanded")
>>> px.transform(df).head().expanded
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> px.setParams(outputCol="test").transform(df).head().test
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> polyExpansionPath = temp_path + "/poly-expansion"
>>> px.save(polyExpansionPath)
>>> loadedPx = PolynomialExpansion.load(polyExpansionPath)
>>> loadedPx.getDegree() == px.getDegree()
True
.. versionadded:: 1.4.0
"""
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, degree=2, inputCol=None, outputCol=None):
"""
__init__(self, degree=2, inputCol=None, outputCol=None)
"""
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self._setDefault(degree=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, degree=2, inputCol=None, outputCol=None):
"""
setParams(self, degree=2, inputCol=None, outputCol=None)
Sets params for this PolynomialExpansion.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDegree(self, value):
"""
Sets the value of :py:attr:`degree`.
"""
return self._set(degree=value)
@since("1.4.0")
def getDegree(self):
"""
Gets the value of degree or its default value.
"""
return self.getOrDefault(self.degree)
@inherit_doc
class QuantileDiscretizer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
`QuantileDiscretizer` takes a column with continuous features and outputs a column with binned
categorical features. The number of bins can be set using the :py:attr:`numBuckets` parameter.
It is possible that the number of buckets used will be less than this value, for example, if
there are too few distinct values of the input to create enough distinct quantiles.
NaN handling: Note also that
QuantileDiscretizer will raise an error when it finds NaN values in the dataset, but the user
can also choose to either keep or remove NaN values within the dataset by setting
:py:attr:`handleInvalid` parameter. If the user chooses to keep NaN values, they will be
handled specially and placed into their own bucket, for example, if 4 buckets are used, then
non-NaN data will be put into buckets[0-3], but NaNs will be counted in a special bucket[4].
Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for
:py:meth:`~.DataFrameStatFunctions.approxQuantile` for a detailed description).
The precision of the approximation can be controlled with the
:py:attr:`relativeError` parameter.
The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> qds = QuantileDiscretizer(numBuckets=2,
... inputCol="values", outputCol="buckets", relativeError=0.01, handleInvalid="error")
>>> qds.getRelativeError()
0.01
>>> bucketizer = qds.fit(df)
>>> qds.setHandleInvalid("keep").fit(df).transform(df).count()
6
>>> qds.setHandleInvalid("skip").fit(df).transform(df).count()
4
>>> splits = bucketizer.getSplits()
>>> splits[0]
-inf
>>> print("%2.1f" % round(splits[1], 1))
0.4
>>> bucketed = bucketizer.transform(df).head()
>>> bucketed.buckets
0.0
>>> quantileDiscretizerPath = temp_path + "/quantile-discretizer"
>>> qds.save(quantileDiscretizerPath)
>>> loadedQds = QuantileDiscretizer.load(quantileDiscretizerPath)
>>> loadedQds.getNumBuckets() == qds.getNumBuckets()
True
.. versionadded:: 2.0.0
"""
numBuckets = Param(Params._dummy(), "numBuckets",
"Maximum number of buckets (quantiles, or " +
"categories) into which data points are grouped. Must be >= 2.",
typeConverter=TypeConverters.toInt)
relativeError = Param(Params._dummy(), "relativeError", "The relative target precision for " +
"the approximate quantile algorithm used to generate buckets. " +
"Must be in the range [0, 1].",
typeConverter=TypeConverters.toFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
__init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
"""
super(QuantileDiscretizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.QuantileDiscretizer",
self.uid)
self._setDefault(numBuckets=2, relativeError=0.001, handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
Set the params for the QuantileDiscretizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setNumBuckets(self, value):
"""
Sets the value of :py:attr:`numBuckets`.
"""
return self._set(numBuckets=value)
@since("2.0.0")
def getNumBuckets(self):
"""
Gets the value of numBuckets or its default value.
"""
return self.getOrDefault(self.numBuckets)
@since("2.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
@since("2.0.0")
def getRelativeError(self):
"""
Gets the value of relativeError or its default value.
"""
return self.getOrDefault(self.relativeError)
def _create_model(self, java_model):
"""
Private method to convert the java_model to a Python model.
"""
return Bucketizer(splits=list(java_model.getSplits()),
inputCol=self.getInputCol(),
outputCol=self.getOutputCol(),
handleInvalid=self.getHandleInvalid())
@inherit_doc
@ignore_unicode_prefix
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A regex based tokenizer that extracts tokens either by using the
provided regex pattern (in Java dialect) to split the text
(default) or repeatedly matching the regex (if gaps is false).
Optional parameters also allow filtering tokens using a minimal
length.
It returns an array of strings that can be empty.
>>> df = spark.createDataFrame([("A B c",)], ["text"])
>>> reTokenizer = RegexTokenizer(inputCol="text", outputCol="words")
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> reTokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> reTokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> regexTokenizerPath = temp_path + "/regex-tokenizer"
>>> reTokenizer.save(regexTokenizerPath)
>>> loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath)
>>> loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength()
True
>>> loadedReTokenizer.getGaps() == reTokenizer.getGaps()
True
.. versionadded:: 1.4.0
"""
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)",
typeConverter=TypeConverters.toInt)
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens " +
"(False)")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing",
typeConverter=TypeConverters.toString)
toLowercase = Param(Params._dummy(), "toLowercase", "whether to convert all characters to " +
"lowercase before tokenizing", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
__init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
"""
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+", toLowercase=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
Sets params for this RegexTokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinTokenLength(self, value):
"""
Sets the value of :py:attr:`minTokenLength`.
"""
return self._set(minTokenLength=value)
@since("1.4.0")
def getMinTokenLength(self):
"""
Gets the value of minTokenLength or its default value.
"""
return self.getOrDefault(self.minTokenLength)
@since("1.4.0")
def setGaps(self, value):
"""
Sets the value of :py:attr:`gaps`.
"""
return self._set(gaps=value)
@since("1.4.0")
def getGaps(self):
"""
Gets the value of gaps or its default value.
"""
return self.getOrDefault(self.gaps)
@since("1.4.0")
def setPattern(self, value):
"""
Sets the value of :py:attr:`pattern`.
"""
return self._set(pattern=value)
@since("1.4.0")
def getPattern(self):
"""
Gets the value of pattern or its default value.
"""
return self.getOrDefault(self.pattern)
@since("2.0.0")
def setToLowercase(self, value):
"""
Sets the value of :py:attr:`toLowercase`.
"""
return self._set(toLowercase=value)
@since("2.0.0")
def getToLowercase(self):
"""
Gets the value of toLowercase or its default value.
"""
return self.getOrDefault(self.toLowercase)
@inherit_doc
class SQLTransformer(JavaTransformer, JavaMLReadable, JavaMLWritable):
"""
Implements the transforms which are defined by SQL statement.
Currently we only support SQL syntax like 'SELECT ... FROM __THIS__'
where '__THIS__' represents the underlying table of the input dataset.
>>> df = spark.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"])
>>> sqlTrans = SQLTransformer(
... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
>>> sqlTrans.transform(df).head()
Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0)
>>> sqlTransformerPath = temp_path + "/sql-transformer"
>>> sqlTrans.save(sqlTransformerPath)
>>> loadedSqlTrans = SQLTransformer.load(sqlTransformerPath)
>>> loadedSqlTrans.getStatement() == sqlTrans.getStatement()
True
.. versionadded:: 1.6.0
"""
statement = Param(Params._dummy(), "statement", "SQL statement",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, statement=None):
"""
__init__(self, statement=None)
"""
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, statement=None):
"""
setParams(self, statement=None)
Sets params for this SQLTransformer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStatement(self, value):
"""
Sets the value of :py:attr:`statement`.
"""
return self._set(statement=value)
@since("1.6.0")
def getStatement(self):
"""
Gets the value of statement or its default value.
"""
return self.getOrDefault(self.statement)
@inherit_doc
class StandardScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Standardizes features by removing the mean and scaling to unit variance using column summary
statistics on the samples in the training set.
The "unit std" is computed using the `corrected sample standard deviation \
<https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation>`_,
which is computed as the square root of the unbiased sample variance.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> standardScaler = StandardScaler(inputCol="a", outputCol="scaled")
>>> model = standardScaler.fit(df)
>>> model.mean
DenseVector([1.0])
>>> model.std
DenseVector([1.4142])
>>> model.transform(df).collect()[1].scaled
DenseVector([1.4142])
>>> standardScalerPath = temp_path + "/standard-scaler"
>>> standardScaler.save(standardScalerPath)
>>> loadedStandardScaler = StandardScaler.load(standardScalerPath)
>>> loadedStandardScaler.getWithMean() == standardScaler.getWithMean()
True
>>> loadedStandardScaler.getWithStd() == standardScaler.getWithStd()
True
>>> modelPath = temp_path + "/standard-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = StandardScalerModel.load(modelPath)
>>> loadedModel.std == model.std
True
>>> loadedModel.mean == model.mean
True
.. versionadded:: 1.4.0
"""
withMean = Param(Params._dummy(), "withMean", "Center data with mean",
typeConverter=TypeConverters.toBoolean)
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
__init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
"""
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
self._setDefault(withMean=False, withStd=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
Sets params for this StandardScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setWithMean(self, value):
"""
Sets the value of :py:attr:`withMean`.
"""
return self._set(withMean=value)
@since("1.4.0")
def getWithMean(self):
"""
Gets the value of withMean or its default value.
"""
return self.getOrDefault(self.withMean)
@since("1.4.0")
def setWithStd(self, value):
"""
Sets the value of :py:attr:`withStd`.
"""
return self._set(withStd=value)
@since("1.4.0")
def getWithStd(self):
"""
Gets the value of withStd or its default value.
"""
return self.getOrDefault(self.withStd)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StandardScaler`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def std(self):
"""
Standard deviation of the StandardScalerModel.
"""
return self._call_java("std")
@property
@since("2.0.0")
def mean(self):
"""
Mean of the StandardScalerModel.
"""
return self._call_java("mean")
@inherit_doc
class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
A label indexer that maps a string column of labels to an ML column of label indices.
If the input column is numeric, we cast it to string and index the string values.
The indices are in [0, numLabels). By default, this is ordered by label frequencies
so the most frequent label gets index 0. The ordering behavior is controlled by
setting :py:attr:`stringOrderType`. Its default value is 'frequencyDesc'.
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="frequencyDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)]
>>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels)
>>> itd = inverter.transform(td)
>>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]),
... key=lambda x: x[0])
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')]
>>> stringIndexerPath = temp_path + "/string-indexer"
>>> stringIndexer.save(stringIndexerPath)
>>> loadedIndexer = StringIndexer.load(stringIndexerPath)
>>> loadedIndexer.getHandleInvalid() == stringIndexer.getHandleInvalid()
True
>>> modelPath = temp_path + "/string-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = StringIndexerModel.load(modelPath)
>>> loadedModel.labels == model.labels
True
>>> indexToStringPath = temp_path + "/index-to-string"
>>> inverter.save(indexToStringPath)
>>> loadedInverter = IndexToString.load(indexToStringPath)
>>> loadedInverter.getLabels() == inverter.getLabels()
True
>>> stringIndexer.getStringOrderType()
'frequencyDesc'
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="alphabetDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 2.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 0.0)]
.. versionadded:: 1.4.0
"""
stringOrderType = Param(Params._dummy(), "stringOrderType",
"How to order labels of string column. The first label after " +
"ordering is assigned an index of 0. Supported options: " +
"frequencyDesc, frequencyAsc, alphabetDesc, alphabetAsc.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid data (unseen " +
"or NULL values) in features and label column of string type. " +
"Options are 'skip' (filter out rows with invalid data), " +
"error (throw an error), or 'keep' (put invalid data " +
"in a special additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, handleInvalid="error",
stringOrderType="frequencyDesc"):
"""
__init__(self, inputCol=None, outputCol=None, handleInvalid="error", \
stringOrderType="frequencyDesc")
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
self._setDefault(handleInvalid="error", stringOrderType="frequencyDesc")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCol=None, outputCol=None, handleInvalid="error",
stringOrderType="frequencyDesc"):
"""
setParams(self, inputCol=None, outputCol=None, handleInvalid="error", \
stringOrderType="frequencyDesc")
Sets params for this StringIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
@since("2.3.0")
def setStringOrderType(self, value):
"""
Sets the value of :py:attr:`stringOrderType`.
"""
return self._set(stringOrderType=value)
@since("2.3.0")
def getStringOrderType(self):
"""
Gets the value of :py:attr:`stringOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringOrderType)
class StringIndexerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StringIndexer`.
.. versionadded:: 1.4.0
"""
@property
@since("1.5.0")
def labels(self):
"""
Ordered list of labels, corresponding to indices to be assigned.
"""
return self._call_java("labels")
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A :py:class:`Transformer` that maps a column of indices back to a new column of
corresponding string values.
The index-string mapping is either from the ML attributes of the input column,
or from user-supplied labels (which take precedence over ML attributes).
See L{StringIndexer} for converting strings into indices.
.. versionadded:: 1.6.0
"""
labels = Param(Params._dummy(), "labels",
"Optional array of labels specifying index-string mapping." +
" If not provided or if empty, then metadata from inputCol is used instead.",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, labels=None):
"""
__init__(self, inputCol=None, outputCol=None, labels=None)
"""
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, labels=None):
"""
setParams(self, inputCol=None, outputCol=None, labels=None)
Sets params for this IndexToString.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setLabels(self, value):
"""
Sets the value of :py:attr:`labels`.
"""
return self._set(labels=value)
@since("1.6.0")
def getLabels(self):
"""
Gets the value of :py:attr:`labels` or its default value.
"""
return self.getOrDefault(self.labels)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that filters out stop words from input.
.. note:: null values from input array are preserved unless adding null to stopWords explicitly.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["text"])
>>> remover = StopWordsRemover(inputCol="text", outputCol="words", stopWords=["b"])
>>> remover.transform(df).head().words == ['a', 'c']
True
>>> stopWordsRemoverPath = temp_path + "/stopwords-remover"
>>> remover.save(stopWordsRemoverPath)
>>> loadedRemover = StopWordsRemover.load(stopWordsRemoverPath)
>>> loadedRemover.getStopWords() == remover.getStopWords()
True
>>> loadedRemover.getCaseSensitive() == remover.getCaseSensitive()
True
.. versionadded:: 1.6.0
"""
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out",
typeConverter=TypeConverters.toListString)
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False):
"""
__init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false)
"""
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self._setDefault(stopWords=StopWordsRemover.loadDefaultStopWords("english"),
caseSensitive=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False):
"""
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStopWords(self, value):
"""
Sets the value of :py:attr:`stopWords`.
"""
return self._set(stopWords=value)
@since("1.6.0")
def getStopWords(self):
"""
Gets the value of :py:attr:`stopWords` or its default value.
"""
return self.getOrDefault(self.stopWords)
@since("1.6.0")
def setCaseSensitive(self, value):
"""
Sets the value of :py:attr:`caseSensitive`.
"""
return self._set(caseSensitive=value)
@since("1.6.0")
def getCaseSensitive(self):
"""
Gets the value of :py:attr:`caseSensitive` or its default value.
"""
return self.getOrDefault(self.caseSensitive)
@staticmethod
@since("2.0.0")
def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language))
@inherit_doc
@ignore_unicode_prefix
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
>>> df = spark.createDataFrame([("a b c",)], ["text"])
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> tokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> tokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> tokenizerPath = temp_path + "/tokenizer"
>>> tokenizer.save(tokenizerPath)
>>> loadedTokenizer = Tokenizer.load(tokenizerPath)
>>> loadedTokenizer.transform(df).head().tokens == tokenizer.transform(df).head().tokens
True
.. versionadded:: 1.3.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this Tokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that merges multiple columns into a vector column.
>>> df = spark.createDataFrame([(1, 0, 3)], ["a", "b", "c"])
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
>>> vecAssembler.transform(df).head().features
DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
DenseVector([0.0, 1.0])
>>> vectorAssemblerPath = temp_path + "/vector-assembler"
>>> vecAssembler.save(vectorAssemblerPath)
>>> loadedAssembler = VectorAssembler.load(vectorAssemblerPath)
>>> loadedAssembler.transform(df).head().freqs == vecAssembler.transform(df).head().freqs
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this VectorAssembler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Class for indexing categorical feature columns in a dataset of `Vector`.
This has 2 usage modes:
- Automatically identify categorical features (default behavior)
- This helps process a dataset of unknown vectors into a dataset with some continuous
features and some categorical features. The choice between continuous and categorical
is based upon a maxCategories parameter.
- Set maxCategories to the maximum number of categorical any categorical feature should
have.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1},
and feature 1 will be declared continuous.
- Index all features, if all features are categorical
- If maxCategories is set to be very large, then this will build an index of unique
values for all features.
- Warning: This can cause problems if features are continuous since this will collect ALL
unique values to the driver.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories >= 3, then both features will be declared categorical.
This returns a model which can transform categorical features to use 0-based indices.
Index stability:
- This is not guaranteed to choose the same category index across multiple runs.
- If a categorical feature includes value 0, then this is guaranteed to map value 0 to
index 0. This maintains vector sparsity.
- More stability may be added in the future.
TODO: Future extensions: The following functionality is planned for the future:
- Preserve metadata in transform; if a feature's metadata is already present,
do not recompute.
- Specify certain features to not index, either via a parameter or via existing metadata.
- Add warning if a categorical feature has only 1 category.
- Add option for allowing unknown categories.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([-1.0, 0.0]),),
... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"])
>>> indexer = VectorIndexer(maxCategories=2, inputCol="a", outputCol="indexed")
>>> model = indexer.fit(df)
>>> model.transform(df).head().indexed
DenseVector([1.0, 0.0])
>>> model.numFeatures
2
>>> model.categoryMaps
{0: {0.0: 0, -1.0: 1}}
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
>>> model2 = indexer.fit(df, params)
>>> model2.transform(df).head().vector
DenseVector([1.0, 0.0])
>>> vectorIndexerPath = temp_path + "/vector-indexer"
>>> indexer.save(vectorIndexerPath)
>>> loadedIndexer = VectorIndexer.load(vectorIndexerPath)
>>> loadedIndexer.getMaxCategories() == indexer.getMaxCategories()
True
>>> modelPath = temp_path + "/vector-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = VectorIndexerModel.load(modelPath)
>>> loadedModel.numFeatures == model.numFeatures
True
>>> loadedModel.categoryMaps == model.categoryMaps
True
.. versionadded:: 1.4.0
"""
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, maxCategories=20, inputCol=None, outputCol=None):
"""
__init__(self, maxCategories=20, inputCol=None, outputCol=None)
"""
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
self._setDefault(maxCategories=20)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, maxCategories=20, inputCol=None, outputCol=None):
"""
setParams(self, maxCategories=20, inputCol=None, outputCol=None)
Sets params for this VectorIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMaxCategories(self, value):
"""
Sets the value of :py:attr:`maxCategories`.
"""
return self._set(maxCategories=value)
@since("1.4.0")
def getMaxCategories(self):
"""
Gets the value of maxCategories or its default value.
"""
return self.getOrDefault(self.maxCategories)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`VectorIndexer`.
Transform categorical features to use 0-based indices instead of their original values.
- Categorical features are mapped to indices.
- Continuous features (columns) are left unchanged.
This also appends metadata to the output column, marking features as Numeric (continuous),
Nominal (categorical), or Binary (either continuous or categorical).
Non-ML metadata is not carried over from the input to the output column.
This maintains vector sparsity.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def numFeatures(self):
"""
Number of features, i.e., length of Vectors which this transforms.
"""
return self._call_java("numFeatures")
@property
@since("1.4.0")
def categoryMaps(self):
"""
Feature value index. Keys are categorical feature indices (column indices).
Values are maps from original features values to 0-based category indices.
If a feature is not in this map, it is treated as continuous.
"""
return self._call_java("javaCategoryMaps")
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
This class takes a feature vector and outputs a new feature vector with a subarray
of the original features.
The subset of features can be specified with either indices (`setIndices()`)
or names (`setNames()`). At least one feature must be selected. Duplicate features
are not allowed, so there can be no overlap between selected indices and names.
The output vector will order features with the selected indices first (in the order given),
followed by the selected names (in the order given).
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),
... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),
... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"])
>>> vs = VectorSlicer(inputCol="features", outputCol="sliced", indices=[1, 4])
>>> vs.transform(df).head().sliced
DenseVector([2.3, 1.0])
>>> vectorSlicerPath = temp_path + "/vector-slicer"
>>> vs.save(vectorSlicerPath)
>>> loadedVs = VectorSlicer.load(vectorSlicerPath)
>>> loadedVs.getIndices() == vs.getIndices()
True
>>> loadedVs.getNames() == vs.getNames()
True
.. versionadded:: 1.6.0
"""
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.",
typeConverter=TypeConverters.toListInt)
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.", typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
__init__(self, inputCol=None, outputCol=None, indices=None, names=None)
"""
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self._setDefault(indices=[], names=[])
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
Sets params for this VectorSlicer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setIndices(self, value):
"""
Sets the value of :py:attr:`indices`.
"""
return self._set(indices=value)
@since("1.6.0")
def getIndices(self):
"""
Gets the value of indices or its default value.
"""
return self.getOrDefault(self.indices)
@since("1.6.0")
def setNames(self, value):
"""
Sets the value of :py:attr:`names`.
"""
return self._set(names=value)
@since("1.6.0")
def getNames(self):
"""
Gets the value of names or its default value.
"""
return self.getOrDefault(self.names)
@inherit_doc
@ignore_unicode_prefix
class Word2Vec(JavaEstimator, HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further
natural language processing or machine learning process.
>>> sent = ("a b " * 100 + "a c " * 10).split(" ")
>>> doc = spark.createDataFrame([(sent,), (sent,)], ["sentence"])
>>> word2Vec = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model")
>>> model = word2Vec.fit(doc)
>>> model.getVectors().show()
+----+--------------------+
|word| vector|
+----+--------------------+
| a|[0.09461779892444...|
| b|[1.15474212169647...|
| c|[-0.3794820010662...|
+----+--------------------+
...
>>> from pyspark.sql.functions import format_number as fmt
>>> model.findSynonyms("a", 2).select("word", fmt("similarity", 5).alias("similarity")).show()
+----+----------+
|word|similarity|
+----+----------+
| b| 0.25053|
| c| -0.69805|
+----+----------+
...
>>> model.transform(doc).head().model
DenseVector([0.5524, -0.4995, -0.3599, 0.0241, 0.3461])
>>> word2vecPath = temp_path + "/word2vec"
>>> word2Vec.save(word2vecPath)
>>> loadedWord2Vec = Word2Vec.load(word2vecPath)
>>> loadedWord2Vec.getVectorSize() == word2Vec.getVectorSize()
True
>>> loadedWord2Vec.getNumPartitions() == word2Vec.getNumPartitions()
True
>>> loadedWord2Vec.getMinCount() == word2Vec.getMinCount()
True
>>> modelPath = temp_path + "/word2vec-model"
>>> model.save(modelPath)
>>> loadedModel = Word2VecModel.load(modelPath)
>>> loadedModel.getVectors().first().word == model.getVectors().first().word
True
>>> loadedModel.getVectors().first().vector == model.getVectors().first().vector
True
.. versionadded:: 1.4.0
"""
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words",
typeConverter=TypeConverters.toInt)
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words",
typeConverter=TypeConverters.toInt)
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary", typeConverter=TypeConverters.toInt)
windowSize = Param(Params._dummy(), "windowSize",
"the window size (context words from [-window, window]). Default value is 5",
typeConverter=TypeConverters.toInt)
maxSentenceLength = Param(Params._dummy(), "maxSentenceLength",
"Maximum length (in words) of each sentence in the input data. " +
"Any sentence longer than this threshold will " +
"be divided into chunks up to the size.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
__init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
"""
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
windowSize=5, maxSentenceLength=1000)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
setParams(self, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, \
inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
Sets params for this Word2Vec.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setVectorSize(self, value):
"""
Sets the value of :py:attr:`vectorSize`.
"""
return self._set(vectorSize=value)
@since("1.4.0")
def getVectorSize(self):
"""
Gets the value of vectorSize or its default value.
"""
return self.getOrDefault(self.vectorSize)
@since("1.4.0")
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
@since("1.4.0")
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
@since("1.4.0")
def setMinCount(self, value):
"""
Sets the value of :py:attr:`minCount`.
"""
return self._set(minCount=value)
@since("1.4.0")
def getMinCount(self):
"""
Gets the value of minCount or its default value.
"""
return self.getOrDefault(self.minCount)
@since("2.0.0")
def setWindowSize(self, value):
"""
Sets the value of :py:attr:`windowSize`.
"""
return self._set(windowSize=value)
@since("2.0.0")
def getWindowSize(self):
"""
Gets the value of windowSize or its default value.
"""
return self.getOrDefault(self.windowSize)
@since("2.0.0")
def setMaxSentenceLength(self, value):
"""
Sets the value of :py:attr:`maxSentenceLength`.
"""
return self._set(maxSentenceLength=value)
@since("2.0.0")
def getMaxSentenceLength(self):
"""
Gets the value of maxSentenceLength or its default value.
"""
return self.getOrDefault(self.maxSentenceLength)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`Word2Vec`.
.. versionadded:: 1.4.0
"""
@since("1.5.0")
def getVectors(self):
"""
Returns the vector representation of the words as a dataframe
with two fields, word and vector.
"""
return self._call_java("getVectors")
@since("1.5.0")
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@inherit_doc
class PCA(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
PCA trains a model to project vectors to a lower dimensional space of the
top :py:attr:`k` principal components.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
>>> df = spark.createDataFrame(data,["features"])
>>> pca = PCA(k=2, inputCol="features", outputCol="pca_features")
>>> model = pca.fit(df)
>>> model.transform(df).collect()[0].pca_features
DenseVector([1.648..., -4.013...])
>>> model.explainedVariance
DenseVector([0.794..., 0.205...])
>>> pcaPath = temp_path + "/pca"
>>> pca.save(pcaPath)
>>> loadedPca = PCA.load(pcaPath)
>>> loadedPca.getK() == pca.getK()
True
>>> modelPath = temp_path + "/pca-model"
>>> model.save(modelPath)
>>> loadedModel = PCAModel.load(modelPath)
>>> loadedModel.pc == model.pc
True
>>> loadedModel.explainedVariance == model.explainedVariance
True
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "the number of principal components",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, k=None, inputCol=None, outputCol=None):
"""
__init__(self, k=None, inputCol=None, outputCol=None)
"""
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, k=None, inputCol=None, outputCol=None):
"""
setParams(self, k=None, inputCol=None, outputCol=None)
Set params for this PCA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`PCA`. Transforms vectors to a lower dimensional space.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pc(self):
"""
Returns a principal components Matrix.
Each column is one principal component.
"""
return self._call_java("pc")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns a vector of proportions of variance
explained by each principal component.
"""
return self._call_java("explainedVariance")
@inherit_doc
class RFormula(JavaEstimator, HasFeaturesCol, HasLabelCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Implements the transforms required for fitting a dataset against an
R model formula. Currently we support a limited subset of the R
operators, including '~', '.', ':', '+', and '-'. Also see the `R formula docs
<http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html>`_.
>>> df = spark.createDataFrame([
... (1.0, 1.0, "a"),
... (0.0, 2.0, "b"),
... (0.0, 0.0, "a")
... ], ["y", "x", "s"])
>>> rf = RFormula(formula="y ~ x + s")
>>> model = rf.fit(df)
>>> model.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show()
+---+---+---+--------+-----+
| y| x| s|features|label|
+---+---+---+--------+-----+
|1.0|1.0| a| [1.0]| 1.0|
|0.0|2.0| b| [2.0]| 0.0|
|0.0|0.0| a| [0.0]| 0.0|
+---+---+---+--------+-----+
...
>>> rFormulaPath = temp_path + "/rFormula"
>>> rf.save(rFormulaPath)
>>> loadedRF = RFormula.load(rFormulaPath)
>>> loadedRF.getFormula() == rf.getFormula()
True
>>> loadedRF.getFeaturesCol() == rf.getFeaturesCol()
True
>>> loadedRF.getLabelCol() == rf.getLabelCol()
True
>>> loadedRF.getHandleInvalid() == rf.getHandleInvalid()
True
>>> str(loadedRF)
'RFormula(y ~ x + s) (uid=...)'
>>> modelPath = temp_path + "/rFormulaModel"
>>> model.save(modelPath)
>>> loadedModel = RFormulaModel.load(modelPath)
>>> loadedModel.uid == model.uid
True
>>> loadedModel.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> str(loadedModel)
'RFormulaModel(ResolvedRFormula(label=y, terms=[x,s], hasIntercept=true)) (uid=...)'
.. versionadded:: 1.5.0
"""
formula = Param(Params._dummy(), "formula", "R model formula",
typeConverter=TypeConverters.toString)
forceIndexLabel = Param(Params._dummy(), "forceIndexLabel",
"Force to index label whether it is numeric or string",
typeConverter=TypeConverters.toBoolean)
stringIndexerOrderType = Param(Params._dummy(), "stringIndexerOrderType",
"How to order categories of a string feature column used by " +
"StringIndexer. The last category after ordering is dropped " +
"when encoding strings. Supported options: frequencyDesc, " +
"frequencyAsc, alphabetDesc, alphabetAsc. The default value " +
"is frequencyDesc. When the ordering is set to alphabetDesc, " +
"RFormula drops the same category as R when encoding strings.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (put invalid data in a special " +
"additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
__init__(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
"""
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
self._setDefault(forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
setParams(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
Sets params for RFormula.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setFormula(self, value):
"""
Sets the value of :py:attr:`formula`.
"""
return self._set(formula=value)
@since("1.5.0")
def getFormula(self):
"""
Gets the value of :py:attr:`formula`.
"""
return self.getOrDefault(self.formula)
@since("2.1.0")
def setForceIndexLabel(self, value):
"""
Sets the value of :py:attr:`forceIndexLabel`.
"""
return self._set(forceIndexLabel=value)
@since("2.1.0")
def getForceIndexLabel(self):
"""
Gets the value of :py:attr:`forceIndexLabel`.
"""
return self.getOrDefault(self.forceIndexLabel)
@since("2.3.0")
def setStringIndexerOrderType(self, value):
"""
Sets the value of :py:attr:`stringIndexerOrderType`.
"""
return self._set(stringIndexerOrderType=value)
@since("2.3.0")
def getStringIndexerOrderType(self):
"""
Gets the value of :py:attr:`stringIndexerOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringIndexerOrderType)
def _create_model(self, java_model):
return RFormulaModel(java_model)
def __str__(self):
formulaStr = self.getFormula() if self.isDefined(self.formula) else ""
return "RFormula(%s) (uid=%s)" % (formulaStr, self.uid)
class RFormulaModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`RFormula`. Fitting is required to determine the
factor levels of formula terms.
.. versionadded:: 1.5.0
"""
def __str__(self):
resolvedFormula = self._call_java("resolvedFormula")
return "RFormulaModel(%s) (uid=%s)" % (resolvedFormula, self.uid)
@inherit_doc
class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Chi-Squared feature selection, which selects categorical features to use for predicting a
categorical label.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0),
... (Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0),
... (Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0)],
... ["features", "label"])
>>> selector = ChiSqSelector(numTopFeatures=1, outputCol="selectedFeatures")
>>> model = selector.fit(df)
>>> model.transform(df).head().selectedFeatures
DenseVector([18.0])
>>> model.selectedFeatures
[2]
>>> chiSqSelectorPath = temp_path + "/chi-sq-selector"
>>> selector.save(chiSqSelectorPath)
>>> loadedSelector = ChiSqSelector.load(chiSqSelectorPath)
>>> loadedSelector.getNumTopFeatures() == selector.getNumTopFeatures()
True
>>> modelPath = temp_path + "/chi-sq-selector-model"
>>> model.save(modelPath)
>>> loadedModel = ChiSqSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
.. versionadded:: 2.0.0
"""
selectorType = Param(Params._dummy(), "selectorType",
"The selector type of the ChisqSelector. " +
"Supported options: numTopFeatures (default), percentile and fpr.",
typeConverter=TypeConverters.toString)
numTopFeatures = \
Param(Params._dummy(), "numTopFeatures",
"Number of features that selector will select, ordered by ascending p-value. " +
"If the number of features is < numTopFeatures, then this will select " +
"all features.", typeConverter=TypeConverters.toInt)
percentile = Param(Params._dummy(), "percentile", "Percentile of features that selector " +
"will select, ordered by ascending p-value.",
typeConverter=TypeConverters.toFloat)
fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.",
typeConverter=TypeConverters.toFloat)
fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.",
typeConverter=TypeConverters.toFloat)
fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
__init__(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
"""
super(ChiSqSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid)
self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1,
fpr=0.05, fdr=0.05, fwe=0.05)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
Sets params for this ChiSqSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.1.0")
def setSelectorType(self, value):
"""
Sets the value of :py:attr:`selectorType`.
"""
return self._set(selectorType=value)
@since("2.1.0")
def getSelectorType(self):
"""
Gets the value of selectorType or its default value.
"""
return self.getOrDefault(self.selectorType)
@since("2.0.0")
def setNumTopFeatures(self, value):
"""
Sets the value of :py:attr:`numTopFeatures`.
Only applicable when selectorType = "numTopFeatures".
"""
return self._set(numTopFeatures=value)
@since("2.0.0")
def getNumTopFeatures(self):
"""
Gets the value of numTopFeatures or its default value.
"""
return self.getOrDefault(self.numTopFeatures)
@since("2.1.0")
def setPercentile(self, value):
"""
Sets the value of :py:attr:`percentile`.
Only applicable when selectorType = "percentile".
"""
return self._set(percentile=value)
@since("2.1.0")
def getPercentile(self):
"""
Gets the value of percentile or its default value.
"""
return self.getOrDefault(self.percentile)
@since("2.1.0")
def setFpr(self, value):
"""
Sets the value of :py:attr:`fpr`.
Only applicable when selectorType = "fpr".
"""
return self._set(fpr=value)
@since("2.1.0")
def getFpr(self):
"""
Gets the value of fpr or its default value.
"""
return self.getOrDefault(self.fpr)
@since("2.2.0")
def setFdr(self, value):
"""
Sets the value of :py:attr:`fdr`.
Only applicable when selectorType = "fdr".
"""
return self._set(fdr=value)
@since("2.2.0")
def getFdr(self):
"""
Gets the value of fdr or its default value.
"""
return self.getOrDefault(self.fdr)
@since("2.2.0")
def setFwe(self, value):
"""
Sets the value of :py:attr:`fwe`.
Only applicable when selectorType = "fwe".
"""
return self._set(fwe=value)
@since("2.2.0")
def getFwe(self):
"""
Gets the value of fwe or its default value.
"""
return self.getOrDefault(self.fwe)
def _create_model(self, java_model):
return ChiSqSelectorModel(java_model)
class ChiSqSelectorModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`ChiSqSelector`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.feature
from pyspark.sql import Row, SparkSession
globs = globals().copy()
features = pyspark.ml.feature.__dict__.copy()
globs.update(features)
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.feature tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = spark.createDataFrame(testData)
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| {
"content_hash": "b6964e23ad25cb934a3d500e7350234f",
"timestamp": "",
"source": "github",
"line_count": 3457,
"max_line_length": 100,
"avg_line_length": 37.1527335840324,
"alnum_prop": 0.603525463846088,
"repo_name": "SHASHANKB/spark",
"id": "050537b811f610f45a51d8c3d9eb20ca213e8519",
"size": "129222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/feature.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33076"
},
{
"name": "Batchfile",
"bytes": "24315"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10056"
},
{
"name": "Java",
"bytes": "3027017"
},
{
"name": "JavaScript",
"bytes": "141001"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "8788"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2316513"
},
{
"name": "R",
"bytes": "1075111"
},
{
"name": "Roff",
"bytes": "14736"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "23250269"
},
{
"name": "Shell",
"bytes": "155047"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
} |
class Node:
next_node = None
data = 0
def __init__(self, value):
self.data = value
def run(input_data1, input_data2):
i = 0
number1 = 0
while input_data1 is not None:
number1 += input_data1.data * pow(10, i)
input_data1 = input_data1.next_node
i += 1
i = 0
number2 = 0
while input_data2 is not None:
number2 += input_data2.data * pow(10, i)
input_data2 = input_data2.next_node
i += 1
total = number1 + number2
total_str = str(total)
output_data = None
for char in reversed(total_str):
node = Node(int(char))
node.next_node = output_data
output_data = node
return output_data
| {
"content_hash": "4ce5e1cca5315f632cf8e68fd1ee040a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 48,
"avg_line_length": 20.914285714285715,
"alnum_prop": 0.5519125683060109,
"repo_name": "gonditeniz/cracking-coding-interview",
"id": "6bbcac49fcd3d59fc7f10c87da4adc620d05f17b",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/chapter2/exercise5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3783"
},
{
"name": "Java",
"bytes": "4166"
},
{
"name": "Python",
"bytes": "18819"
},
{
"name": "Shell",
"bytes": "4886"
}
],
"symlink_target": ""
} |
from django.contrib.admin.filters import SimpleListFilter
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.contrib.admin import helpers
from .forms import TagObjectForm
class AdminTagAllMixIn(object):
def tag_all(self, request, queryset):
"""
Tag all selected requests with given tags
"""
opts = self.model._meta
# Check that the user has change permission for the actual model
if not self.has_change_permission(request):
raise PermissionDenied
# User has already chosen the other req
if request.POST.get('tags'):
form = TagObjectForm(None, request.POST, tags=[],
resource_name=self.autocomplete_resource_name)
if form.is_valid():
tags = form.cleaned_data['tags']
for obj in queryset:
obj.tags.set(*tags)
obj.save()
self.message_user(request, _("Successfully added tags"))
# Return None to display the change list page again.
return None
self.message_user(request, _("Form invalid"))
tags = set()
for q in queryset:
tags |= set([o for o in q.tags.all()])
form = TagObjectForm(None, tags=tags,
resource_name=self.autocomplete_resource_name)
context = {
'opts': opts,
'queryset': queryset,
'media': self.media,
'form': form,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'applabel': opts.app_label
}
# Display the confirmation page
return TemplateResponse(request, 'admin_utils/admin_tag_all.html',
context, current_app=self.admin_site.name)
tag_all.short_description = _("Tag all with...")
class NullFilterSpec(SimpleListFilter):
"""
Taken from
http://stackoverflow.com/questions/7691890/filtering-django-admin-by-null-is-not-null
under CC-By 3.0
"""
title = u''
parameter_name = u''
def lookups(self, request, model_admin):
return (
('1', _('Has value')),
('0', _('None')),
)
def queryset(self, request, queryset):
kwargs = {
'%s' % self.parameter_name: None,
}
if self.value() == '0':
return queryset.filter(**kwargs)
if self.value() == '1':
return queryset.exclude(**kwargs)
return queryset
| {
"content_hash": "afc65d4e0bf82ca82379527169a097d9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 89,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.5729402872260015,
"repo_name": "LilithWittmann/froide",
"id": "96e4ba1d9a97fec98026b2f20a8c101b20221c10",
"size": "2646",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "froide/helper/admin_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17829"
},
{
"name": "HTML",
"bytes": "162270"
},
{
"name": "Java",
"bytes": "1814475"
},
{
"name": "JavaScript",
"bytes": "52679"
},
{
"name": "Makefile",
"bytes": "329"
},
{
"name": "Python",
"bytes": "1614641"
},
{
"name": "Shell",
"bytes": "1621"
}
],
"symlink_target": ""
} |
"""Box widgets.
These widgets are containers that can be used to
group other widgets together and control their
relative layouts.
"""
from .widget import register, widget_serialization, Widget
from .domwidget import DOMWidget
from .widget_core import CoreWidget
from .docutils import doc_subst
from .trait_types import TypedTuple
from traitlets import Unicode, CaselessStrEnum, Instance
_doc_snippets = {}
_doc_snippets['box_params'] = """
children: iterable of Widget instances
list of widgets to display
box_style: str
one of 'success', 'info', 'warning' or 'danger', or ''.
Applies a predefined style to the box. Defaults to '',
which applies no pre-defined style.
"""
@register
@doc_subst(_doc_snippets)
class Box(DOMWidget, CoreWidget):
""" Displays multiple widgets in a group.
The widgets are laid out horizontally.
Parameters
----------
{box_params}
Examples
--------
>>> import ipywidgets as widgets
>>> title_widget = widgets.HTML('<em>Box Example</em>')
>>> slider = widgets.IntSlider()
>>> widgets.Box([title_widget, slider])
"""
_model_name = Unicode('BoxModel').tag(sync=True)
_view_name = Unicode('BoxView').tag(sync=True)
# Child widgets in the container.
# Using a tuple here to force reassignment to update the list.
# When a proper notifying-list trait exists, use that instead.
children = TypedTuple(trait=Instance(Widget), help="List of widget children").tag(
sync=True, **widget_serialization)
box_style = CaselessStrEnum(
values=['success', 'info', 'warning', 'danger', ''], default_value='',
help="""Use a predefined styling for the box.""").tag(sync=True)
def __init__(self, children=(), **kwargs):
kwargs['children'] = children
super(Box, self).__init__(**kwargs)
self.on_displayed(Box._fire_children_displayed)
def _fire_children_displayed(self):
for child in self.children:
child._handle_displayed()
@register
@doc_subst(_doc_snippets)
class VBox(Box):
""" Displays multiple widgets vertically using the flexible box model.
Parameters
----------
{box_params}
Examples
--------
>>> import ipywidgets as widgets
>>> title_widget = widgets.HTML('<em>Vertical Box Example</em>')
>>> slider = widgets.IntSlider()
>>> widgets.VBox([title_widget, slider])
"""
_model_name = Unicode('VBoxModel').tag(sync=True)
_view_name = Unicode('VBoxView').tag(sync=True)
@register
@doc_subst(_doc_snippets)
class HBox(Box):
""" Displays multiple widgets horizontally using the flexible box model.
Parameters
----------
{box_params}
Examples
--------
>>> import ipywidgets as widgets
>>> title_widget = widgets.HTML('<em>Horizontal Box Example</em>')
>>> slider = widgets.IntSlider()
>>> widgets.HBox([title_widget, slider])
"""
_model_name = Unicode('HBoxModel').tag(sync=True)
_view_name = Unicode('HBoxView').tag(sync=True)
@register
class GridBox(Box):
""" Displays multiple widgets in rows and columns using the grid box model.
Parameters
----------
{box_params}
Examples
--------
>>> import ipywidgets as widgets
>>> title_widget = widgets.HTML('<em>Grid Box Example</em>')
>>> slider = widgets.IntSlider()
>>> button1 = widgets.Button(description='1')
>>> button2 = widgets.Button(description='2')
>>> # Create a grid with two columns, splitting space equally
>>> layout = widgets.Layout(grid_template_columns='1fr 1fr')
>>> widgets.GridBox([title_widget, slider, button1, button2], layout=layout)
"""
_model_name = Unicode('GridBoxModel').tag(sync=True)
_view_name = Unicode('GridBoxView').tag(sync=True)
| {
"content_hash": "60bae54342165b382083691e695dd99e",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 86,
"avg_line_length": 29.53488372093023,
"alnum_prop": 0.6438320209973754,
"repo_name": "sserrot/champion_relationships",
"id": "516ee19b9a5a9617e6a8df0836b235fa1749c853",
"size": "3912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/ipywidgets/widgets/widget_box.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
import opendbpy as odb
import helper
import odbUnitTest
class TestInst(odbUnitTest.TestCase):
def setUp(self):
self.db, self.lib = helper.createSimpleDB()
self.block = helper.create2LevelBlock(self.db, self.lib, self.db.getChip())
self.i1 = self.block.findInst('i1')
def tearDown(self):
self.db.destroy(self.db)
def test_swap_master(self):
self.assertEqual(self.i1.getMaster().getName(), 'and2')
#testing with a gate with different mterm names
gate = helper.createMaster2X1(self.lib, '_g2', 800, 800, '_a', '_b', '_o')
self.assertFalse(self.i1.swapMaster(gate))
self.assertNotEqual(self.i1.getMaster().getName(), '_g2')
for iterm in self.i1.getITerms():
self.assertNotIn(iterm.getMTerm().getName(), ['_a', '_b', '_o'])
#testing with a gate with different mterms number
gate = helper.createMaster3X1(self.lib, '_g3', 800, 800, '_a', '_b', '_c', '_o')
self.assertFalse(self.i1.swapMaster(gate))
self.assertNotEqual(self.i1.getMaster().getName(), '_g3')
for iterm in self.i1.getITerms():
self.assertNotIn(iterm.getMTerm().getName(), ['_a', '_b', '_c', '_o'])
#testing with a gate with same mterm names
gate = helper.createMaster2X1(self.lib, 'g2', 800, 800, 'a', 'b', 'o')
self.assertTrue(self.i1.swapMaster(gate))
self.assertEqual(self.i1.getMaster().getName(), 'g2')
self.assertEqual(self.i1.getMaster().getWidth(), 800)
self.assertEqual(self.i1.getMaster().getHeight(), 800)
if __name__=='__main__':
odbUnitTest.mainParallel(TestInst)
# odbUnitTest.main()
| {
"content_hash": "0e2cc8464227790e449a24fe5eb42eb7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 45.7027027027027,
"alnum_prop": 0.615612063867534,
"repo_name": "The-OpenROAD-Project/OpenROAD",
"id": "ac6a3a54a9a6a67c544134e25281338b3b7e2abe",
"size": "1691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/odb/test/unitTestsPython/TestInst.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "679482"
},
{
"name": "C++",
"bytes": "18429342"
},
{
"name": "CMake",
"bytes": "148596"
},
{
"name": "Cuda",
"bytes": "7441"
},
{
"name": "Dockerfile",
"bytes": "3754"
},
{
"name": "Python",
"bytes": "245126"
},
{
"name": "Ruby",
"bytes": "498"
},
{
"name": "SWIG",
"bytes": "314266"
},
{
"name": "Shell",
"bytes": "39400"
},
{
"name": "Tcl",
"bytes": "1767673"
},
{
"name": "Verilog",
"bytes": "51524137"
},
{
"name": "Yacc",
"bytes": "496743"
}
],
"symlink_target": ""
} |
import mock
from jacket.db import compute
from jacket.objects import compute
from jacket.objects.compute import ec2 as ec2_obj
from jacket.tests.compute.unit.objects import test_objects
fake_map = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'uuid': 'fake-uuid-2',
}
class _TestEC2InstanceMapping(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], getattr(obj, field))
def test_create(self):
imap = ec2_obj.EC2InstanceMapping(context=self.context)
imap.uuid = 'fake-uuid-2'
with mock.patch.object(compute, 'ec2_instance_create') as create:
create.return_value = fake_map
imap.create()
self.assertEqual(self.context, imap._context)
imap._context = None
self._compare(self, fake_map, imap)
def test_get_by_uuid(self):
with mock.patch.object(compute, 'ec2_instance_get_by_uuid') as get:
get.return_value = fake_map
imap = ec2_obj.EC2InstanceMapping.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_map, imap)
def test_get_by_ec2_id(self):
with mock.patch.object(compute, 'ec2_instance_get_by_id') as get:
get.return_value = fake_map
imap = ec2_obj.EC2InstanceMapping.get_by_id(self.context, 1)
self._compare(self, fake_map, imap)
class TestEC2InstanceMapping(test_objects._LocalTest, _TestEC2InstanceMapping):
pass
class TestRemoteEC2InstanceMapping(test_objects._RemoteTest,
_TestEC2InstanceMapping):
pass
class _TestEC2VolumeMapping(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], getattr(obj, field))
def test_create(self):
vmap = ec2_obj.EC2VolumeMapping(context=self.context)
vmap.uuid = 'fake-uuid-2'
with mock.patch.object(compute, 'ec2_volume_create') as create:
create.return_value = fake_map
vmap.create()
self.assertEqual(self.context, vmap._context)
vmap._context = None
self._compare(self, fake_map, vmap)
def test_get_by_uuid(self):
with mock.patch.object(compute, 'ec2_volume_get_by_uuid') as get:
get.return_value = fake_map
vmap = ec2_obj.EC2VolumeMapping.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_map, vmap)
def test_get_by_ec2_id(self):
with mock.patch.object(compute, 'ec2_volume_get_by_id') as get:
get.return_value = fake_map
vmap = ec2_obj.EC2VolumeMapping.get_by_id(self.context, 1)
self._compare(self, fake_map, vmap)
class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping):
pass
class TestRemoteEC2VolumeMapping(test_objects._RemoteTest,
_TestEC2VolumeMapping):
pass
class _TestEC2SnapshotMapping(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], getattr(obj, field))
def test_create(self):
smap = ec2_obj.EC2SnapshotMapping(context=self.context)
smap.uuid = 'fake-uuid-2'
with mock.patch.object(compute, 'ec2_snapshot_create') as create:
create.return_value = fake_map
smap.create()
self.assertEqual(self.context, smap._context)
smap._context = None
self._compare(self, fake_map, smap)
def test_get_by_uuid(self):
with mock.patch.object(compute, 'ec2_snapshot_get_by_uuid') as get:
get.return_value = fake_map
smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_map, smap)
def test_get_by_ec2_id(self):
with mock.patch.object(compute, 'ec2_snapshot_get_by_ec2_id') as get:
get.return_value = fake_map
smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1)
self._compare(self, fake_map, smap)
class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping):
pass
class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest,
_TestEC2SnapshotMapping):
pass
class _TestS3ImageMapping(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], obj[field])
def test_create(self):
s3imap = ec2_obj.S3ImageMapping(context=self.context)
s3imap.uuid = 'fake-uuid-2'
with mock.patch.object(compute, 's3_image_create') as create:
create.return_value = fake_map
s3imap.create()
self.assertEqual(self.context, s3imap._context)
s3imap._context = None
self._compare(self, fake_map, s3imap)
def test_get_by_uuid(self):
with mock.patch.object(compute, 's3_image_get_by_uuid') as get:
get.return_value = fake_map
s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_map, s3imap)
def test_get_by_s3_id(self):
with mock.patch.object(compute, 's3_image_get') as get:
get.return_value = fake_map
s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1)
self._compare(self, fake_map, s3imap)
class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping):
pass
class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping):
pass
class _TestEC2Ids(object):
@mock.patch('compute.api.ec2.ec2utils.image_type')
@mock.patch('compute.api.ec2.ec2utils.glance_id_to_ec2_id')
@mock.patch('compute.api.ec2.ec2utils.id_to_ec2_inst_id')
def test_get_by_instance(self, mock_inst, mock_glance, mock_type):
mock_inst.return_value = 'fake-ec2-inst-id'
mock_glance.side_effect = ['fake-ec2-ami-id',
'fake-ec2-kernel-id',
'fake-ec2-ramdisk-id']
mock_type.side_effect = [mock.sentinel.ec2_kernel_type,
mock.sentinel.ec2_ramdisk_type]
inst = compute.Instance(uuid='fake-uuid', image_ref='fake-image-id',
kernel_id='fake-kernel-id',
ramdisk_id='fake-ramdisk-id')
result = ec2_obj.EC2Ids.get_by_instance(self.context, inst)
self.assertEqual('fake-ec2-inst-id', result.instance_id)
self.assertEqual('fake-ec2-ami-id', result.ami_id)
self.assertEqual('fake-ec2-kernel-id', result.kernel_id)
self.assertEqual('fake-ec2-ramdisk-id', result.ramdisk_id)
@mock.patch('compute.api.ec2.ec2utils.glance_id_to_ec2_id')
@mock.patch('compute.api.ec2.ec2utils.id_to_ec2_inst_id')
def test_get_by_instance_no_image_ref(self, mock_inst, mock_glance):
mock_inst.return_value = 'fake-ec2-inst-id'
mock_glance.return_value = None
inst = compute.Instance(uuid='fake-uuid', image_ref=None,
kernel_id=None, ramdisk_id=None)
result = ec2_obj.EC2Ids.get_by_instance(self.context, inst)
self.assertEqual('fake-ec2-inst-id', result.instance_id)
self.assertIsNone(result.ami_id)
self.assertIsNone(result.kernel_id)
self.assertIsNone(result.ramdisk_id)
@mock.patch('compute.api.ec2.ec2utils.image_type')
@mock.patch('compute.api.ec2.ec2utils.glance_id_to_ec2_id')
@mock.patch('compute.api.ec2.ec2utils.id_to_ec2_inst_id')
def test_get_by_instance_no_kernel_id(self, mock_inst, mock_glance,
mock_type):
mock_inst.return_value = 'fake-ec2-inst-id'
mock_glance.side_effect = ['fake-ec2-ami-id',
'fake-ec2-ramdisk-id']
mock_type.return_value = mock.sentinel.ec2_ramdisk_type
inst = compute.Instance(uuid='fake-uuid', image_ref='fake-image-id',
kernel_id=None, ramdisk_id='fake-ramdisk-id')
result = ec2_obj.EC2Ids.get_by_instance(self.context, inst)
self.assertEqual('fake-ec2-inst-id', result.instance_id)
self.assertEqual('fake-ec2-ami-id', result.ami_id)
self.assertIsNone(result.kernel_id)
self.assertEqual('fake-ec2-ramdisk-id', result.ramdisk_id)
@mock.patch('compute.api.ec2.ec2utils.image_type')
@mock.patch('compute.api.ec2.ec2utils.glance_id_to_ec2_id')
@mock.patch('compute.api.ec2.ec2utils.id_to_ec2_inst_id')
def test_get_by_instance_no_ramdisk_id(self, mock_inst, mock_glance,
mock_type):
mock_inst.return_value = 'fake-ec2-inst-id'
mock_glance.side_effect = ['fake-ec2-ami-id',
'fake-ec2-kernel-id']
mock_type.return_value = mock.sentinel.ec2_kernel_type
inst = compute.Instance(uuid='fake-uuid', image_ref='fake-image-id',
kernel_id='fake-kernel-id', ramdisk_id=None)
result = ec2_obj.EC2Ids.get_by_instance(self.context, inst)
self.assertEqual('fake-ec2-inst-id', result.instance_id)
self.assertEqual('fake-ec2-ami-id', result.ami_id)
self.assertEqual('fake-ec2-kernel-id', result.kernel_id)
self.assertIsNone(result.ramdisk_id)
class TestEC2Ids(test_objects._LocalTest, _TestEC2Ids):
pass
class TestRemoteEC2Ids(test_objects._RemoteTest, _TestEC2Ids):
pass
| {
"content_hash": "60c0e8c8845ec0604065be40baa590e4",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 37.90494296577947,
"alnum_prop": 0.6098906610492527,
"repo_name": "HybridF5/jacket",
"id": "8057440331c78507a5ffaddfbe25587acbc01d62",
"size": "10580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/objects/test_ec2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from murano.dsl import dsl_exception
from murano.dsl import exceptions
from murano.tests.unit.dsl.foundation import object_model as om
from murano.tests.unit.dsl.foundation import test_case
class TestExecution(test_case.DslTestCase):
def _load(self):
return self.new_runner(
om.Object('SampleClass1', stringProperty='STRING',
classProperty=om.Object(
'SampleClass2', class2Property='ANOTHER_STRING')))
def test_load(self):
self._load()
def test_load_failure(self):
self.assertRaises(
exceptions.ContractViolationException,
self.new_runner,
om.Object('SampleClass1'))
def test_trace(self):
runner = self._load()
self.assertEqual([], self.traces)
runner.testTrace(123)
self.assertEqual([123, 'STRING', 'ANOTHER_STRING'], self.traces)
runner.testTrace(321)
self.assertEqual([123, 'STRING', 'ANOTHER_STRING',
321, 'STRING', 'ANOTHER_STRING'],
self.traces)
def test_exception(self):
class CustomException(Exception):
pass
def raise_exception():
raise CustomException()
self.register_function(raise_exception, 'raiseException')
runner = self._load()
self.assertRaises(CustomException, runner.testException)
runner.preserve_exception = True
self.assertRaises(dsl_exception.MuranoPlException,
runner.testException)
def test_return(self):
self.assertEqual(3, self._load().testReturn(3))
| {
"content_hash": "de2cb21d13c8a0283b128e50307b1cc0",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 34.25,
"alnum_prop": 0.6131386861313869,
"repo_name": "openstack/murano",
"id": "2a94c05d1f16835c8c782f1e1454825a6026d205",
"size": "2255",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "murano/tests/unit/dsl/test_execution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "1817159"
},
{
"name": "Shell",
"bytes": "37531"
}
],
"symlink_target": ""
} |
from . import consts
from interage.api.utils.strings import remove_lead_and_trail_slash
class APIUri:
medicines = consts.MEDICINES_URI
interactions = consts.INTERACTIONS_URI
active_principles = consts.ACTIVE_PRINCIPLES_URI
obtain_token = consts.OBTAIN_TOKEN_URI
metadata = consts.METADATA
class APIInteractionsMetadata:
actions = consts.INTERACTION_ACTIONS
evidences = consts.INTERACTION_EVIDENCES
severities = consts.INTERACTION_SEVERITIES
class APISettings:
uris = APIUri
url = consts.API_URL
version = consts.VERSION
auth_keys = consts.AUTH_KEYS
interactions_metadata = APIInteractionsMetadata
@classmethod
def get_full_url(self, uri, append_version = True):
uri = remove_lead_and_trail_slash(uri)
url = self.url
version = self.version
if(append_version):
result = '{0}/{1}/{2}'.format(url, version, uri)
else :
result = '{0}/{1}'.format(url, uri)
if(len(uri)):
result += '/'
return result
@classmethod
def register_token(self, token):
self.token = token
| {
"content_hash": "27f28a325630d582ccdac52e31937040",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 27.930232558139537,
"alnum_prop": 0.6178184845961698,
"repo_name": "IntMed/interage_python_sdk",
"id": "32582a07980b3689bb04d86ae235c0a8eb1992f4",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interage/api/config/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21616"
}
],
"symlink_target": ""
} |
'''
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
'''
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings.development')
application = get_wsgi_application()
| {
"content_hash": "c851db70cac9227067fbddef8a740905",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.0625,
"alnum_prop": 0.773067331670823,
"repo_name": "njharman/interview_PPWA",
"id": "8e8686937b34062b42df76f4d4b28086373c76be",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11841"
},
{
"name": "HTML",
"bytes": "6251"
},
{
"name": "Python",
"bytes": "28495"
}
],
"symlink_target": ""
} |
import sys
import json
from libcloud.test import MockHttp, LibcloudTestCase, unittest
from libcloud.compute import providers
from libcloud.utils.py3 import httplib
from libcloud.compute.base import NodeImage, NodeLocation, NodeAuthSSHKey
from libcloud.test.secrets import KAMATERA_PARAMS
from libcloud.compute.types import Provider, NodeState
from libcloud.common.exceptions import BaseHTTPError
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.compute.drivers.kamatera import KamateraNodeDriver
class KamateraAuthenticationTests(LibcloudTestCase):
def setUp(self):
KamateraNodeDriver.connectionCls.conn_class = KamateraMockHttp
self.driver = KamateraNodeDriver("nosuchuser", "nopwd")
def test_authentication_fails(self):
with self.assertRaises(BaseHTTPError):
self.driver.list_locations()
class KamateraNodeDriverTests(LibcloudTestCase):
def setUp(self):
KamateraTestDriver.connectionCls.conn_class = KamateraMockHttp
self.driver = KamateraTestDriver(*KAMATERA_PARAMS)
self.eu_node_location = NodeLocation(
id="EU", name="Amsterdam", country="The Netherlands", driver=self.driver
)
self.il_node_location = NodeLocation(
id="IL", name="Rosh Haayin", country="Israel", driver=self.driver
)
self.centos_8_EU_node_image = NodeImage(
id="EU:6000C2987c9641fd2619a149ba2ca01a",
name="CentOS 8.0 64-bit - Minimal Configuration",
driver=self.driver,
extra={
"datacenter": "EU",
"os": "CentOS",
"code": "8.0 64bit_minimal",
"osDiskSizeGB": 5,
"ramMBMin": {"A": 256, "B": 256, "T": 256, "D": 256},
},
)
self.small_node_size = self.driver.ex_get_size(
ramMB=4096,
diskSizeGB=30,
cpuType="B",
cpuCores=2,
monthlyTrafficPackage="t5000",
id="small",
name="small",
)
def test_creating_driver(self):
cls = providers.get_driver(Provider.KAMATERA)
self.assertIs(cls, KamateraNodeDriver)
def test_features(self):
features = self.driver.features["create_node"]
self.assertIn("password", features)
self.assertIn("generates_password", features)
self.assertIn("ssh_key", features)
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) == 13)
self.assert_object(self.il_node_location, objects=locations)
def test_list_sizes(self):
sizes = self.driver.list_sizes(self.eu_node_location)
self.assertTrue(len(sizes) >= 1)
self.assert_object(self.small_node_size, objects=sizes)
def test_list_images(self):
images = self.driver.list_images(self.eu_node_location)
self.assertTrue(len(images) > 10)
self.assert_object(self.centos_8_EU_node_image, objects=images)
def test_ex_list_capabilities(self):
capabilities = self.driver.ex_list_capabilities(self.eu_node_location)
self.assertEqual(
{
"cpuTypes",
"defaultMonthlyTrafficPackage",
"diskSizeGB",
"monthlyTrafficPackage",
},
set(capabilities.keys()),
)
self.assertTrue(len(capabilities["cpuTypes"]), 4)
self.assertEqual(
{"id", "description", "name", "ramMB", "cpuCores"},
set(capabilities["cpuTypes"][0]),
)
def test_create_node(self):
node = self.driver.create_node(
name="test_server",
size=self.small_node_size,
image=self.centos_8_EU_node_image,
location=self.eu_node_location,
)
self.assertTrue(len(node.id) > 8)
self.assertEqual(node.name, "my-server")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertTrue(len(node.public_ips) > 0)
self.assertTrue(len(node.private_ips) > 0)
self.assertEqual(node.driver, self.driver)
self.assertTrue(len(node.extra["generated_password"]) > 0)
def test_create_node_with_ssh_keys(self):
node = self.driver.create_node(
name="test_server_pubkey",
size=self.small_node_size,
image=self.centos_8_EU_node_image,
location=self.eu_node_location,
auth=NodeAuthSSHKey("publickey"),
)
self.assertTrue(len(node.id) > 8)
self.assertEqual(node.name, "my-server")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertTrue(len(node.public_ips) > 0)
self.assertTrue(len(node.private_ips) > 0)
self.assertEqual(node.driver, self.driver)
self.assertFalse("generated_password" in node.extra)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertTrue(len(nodes) >= 1)
node = nodes[0]
self.assertEqual(node.name, "test_server")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.driver, self.driver)
def test_list_nodes_full(self):
nodes = self.driver.list_nodes(ex_full_details=True)
self.assertTrue(len(nodes) >= 1)
node = nodes[0]
self.assertEqual(node.name, "my-server")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertTrue(len(node.public_ips) > 0)
self.assertTrue(len(node.private_ips) > 0)
self.assertEqual(node.driver, self.driver)
def test_reboot_node(self):
nodes = self.driver.list_nodes()
success = self.driver.reboot_node(nodes[0])
self.assertTrue(success)
def assert_object(self, expected_object, objects):
same_data = any([self.objects_equals(expected_object, obj) for obj in objects])
self.assertTrue(
same_data,
"Objects does not match ({}, {})".format(expected_object, objects[:2]),
)
def objects_equals(self, expected_obj, obj):
for name in vars(expected_obj):
expected_data = getattr(expected_obj, name)
actual_data = getattr(obj, name)
same_data = self.data_equals(expected_data, actual_data)
if not same_data:
break
return same_data
def data_equals(self, expected_data, actual_data):
if isinstance(expected_data, dict):
return self.dicts_equals(expected_data, actual_data)
else:
return expected_data == actual_data
def dicts_equals(self, d1, d2):
dict_keys_same = set(d1.keys()) == set(d2.keys())
if not dict_keys_same:
return False
for key in d1.keys():
if d1[key] != d2[key]:
return False
return True
class KamateraTestDriver(KamateraNodeDriver):
def ex_wait_command(self, *args, **kwargs):
kwargs["poll_interval_seconds"] = 0
return KamateraNodeDriver.ex_wait_command(self, *args, **kwargs)
class KamateraMockHttp(MockHttp):
fixtures = ComputeFileFixtures("kamatera")
def _service_server(self, method, url, body, headers):
client_id, secret = headers["AuthClientId"], headers["AuthSecret"]
if client_id == "nosuchuser" and secret == "nopwd":
body = self.fixtures.load("failed_auth.json")
status = httplib.UNAUTHORIZED
else:
if url == "/service/server" and json.loads(body).get("ssh-key"):
body = self.fixtures.load("create_server_sshkey.json")
else:
body = self.fixtures.load(
{
"/service/server?datacenter=1": "datacenters.json",
"/service/server?sizes=1&datacenter=EU": "sizes_datacenter_EU.json",
"/service/server?images=1&datacenter=EU": "images_datacenter_EU.json",
"/service/server?capabilities=1&datacenter=EU": "capabilities_datacenter_EU.json",
"/service/server": "create_server.json",
}[url]
)
status = httplib.OK
return status, body, {}, httplib.responses[status]
def _service_queue(self, method, url, body, headers):
if not hasattr(self, "_service_queue_call_count"):
self._service_queue_call_count = 0
self._service_queue_call_count += 1
body = self.fixtures.load(
{"/service/queue?id=12345": "queue_12345-%s.json" % self._service_queue_call_count}[url]
)
status = httplib.OK
return status, body, {}, httplib.responses[status]
def _service_server_info(self, method, url, body, headers):
body = self.fixtures.load({"/service/server/info": "server_info.json"}[url])
status = httplib.OK
return status, body, {}, httplib.responses[status]
def _service_servers(self, method, url, body, headers):
body = self.fixtures.load({"/service/servers": "servers.json"}[url])
status = httplib.OK
return status, body, {}, httplib.responses[status]
def _service_server_reboot(self, method, url, body, headers):
body = self.fixtures.load({"/service/server/reboot": "server_operation.json"}[url])
status = httplib.OK
return status, body, {}, httplib.responses[status]
if __name__ == "__main__":
sys.exit(unittest.main())
| {
"content_hash": "f838e015915b47040ed53cfa88b3656a",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 106,
"avg_line_length": 38.473684210526315,
"alnum_prop": 0.6084394401767863,
"repo_name": "apache/libcloud",
"id": "a42e8e1e83db5fcad4ad1ca65151bec1815e7268",
"size": "10286",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/test/compute/test_kamatera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9105547"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1EnvVar(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'value': 'str',
'value_from': 'V1EnvVarSource'
}
attribute_map = {
'name': 'name',
'value': 'value',
'value_from': 'valueFrom'
}
def __init__(self, name=None, value=None, value_from=None, local_vars_configuration=None): # noqa: E501
"""V1EnvVar - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._value_from = None
self.discriminator = None
self.name = name
if value is not None:
self.value = value
if value_from is not None:
self.value_from = value_from
@property
def name(self):
"""Gets the name of this V1EnvVar. # noqa: E501
Name of the environment variable. Must be a C_IDENTIFIER. # noqa: E501
:return: The name of this V1EnvVar. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1EnvVar.
Name of the environment variable. Must be a C_IDENTIFIER. # noqa: E501
:param name: The name of this V1EnvVar. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this V1EnvVar. # noqa: E501
Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
:return: The value of this V1EnvVar. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
:param value: The value of this V1EnvVar. # noqa: E501
:type: str
"""
self._value = value
@property
def value_from(self):
"""Gets the value_from of this V1EnvVar. # noqa: E501
:return: The value_from of this V1EnvVar. # noqa: E501
:rtype: V1EnvVarSource
"""
return self._value_from
@value_from.setter
def value_from(self, value_from):
"""Sets the value_from of this V1EnvVar.
:param value_from: The value_from of this V1EnvVar. # noqa: E501
:type: V1EnvVarSource
"""
self._value_from = value_from
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EnvVar):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EnvVar):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "1b0bc98de9e3fec2d0967054b2481857",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 529,
"avg_line_length": 33.64,
"alnum_prop": 0.5928316629862409,
"repo_name": "kubernetes-client/python",
"id": "227c6711af080ce6df24a0e7f1b053477e31a810",
"size": "5904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_env_var.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
from js9 import j
from .SSHKey import SSHKey
import os
JSConfigBase = j.tools.configmanager.base_class_configs
from .AgentSSHKeys import *
class SSHKeys(JSConfigBase):
def __init__(self):
self.__jslocation__ = "j.clients.sshkey"
JSConfigBase.__init__(self, SSHKey)
self._sshagent = None
@property
def sshagent(self):
# AgentWithName
self._sshagent = AgentWithName()
return self._sshagent
def key_get(self, path, load=True):
instance = j.sal.fs.getBaseName(path)
sshkey = self.get(instance, data={'path': path}, interactive=j.tools.configmanager.interactive)
if load:
sshkey.load()
return sshkey
def key_generate(self, path, passphrase="", overwrite=False, load=False, returnObj=True):
self.logger.debug("generate ssh key")
if overwrite:
j.sal.fs.remove(path)
if not j.sal.fs.exists(path):
cmd = 'ssh-keygen -t rsa -f %s -q -P "%s"' % (path, passphrase)
j.sal.process.execute(cmd, timeout=10)
j.sal.fs.chmod(path, 0o600)
# make sure key is loaded
if load:
self.key_load(path, passphrase=passphrase, returnObj=False, duration=3600)
if returnObj:
data = {}
data["path"] = path
data["passphrase_"] = passphrase
instance = j.sal.fs.getBaseName(path)
sshkeyobj = self.get(instance=instance, data=data, interactive=False)
return sshkeyobj
def key_load(self, path, passphrase="", returnObj=True, duration=3600 * 24):
"""
load the key on path
"""
if not j.sal.fs.exists(path):
raise RuntimeError("Cannot find path:%sfor sshkey (private key)" % path)
j.clients.sshkey.sshagent_check()
if not j.sal.fs.exists(path):
raise RuntimeError("sshkey not found in:'%s'" % path)
name = j.sal.fs.getBaseName(path)
if name in self.listnames():
return self.get(instance=name, data={'path': path})
path0 = j.sal.fs.pathNormalize(path) # otherwise the expect script will fail
self.logger.info("load ssh key: %s" % path0)
j.sal.fs.chmod(path, 0o600)
if passphrase:
self.logger.debug("load with passphrase")
C = """
echo "exec cat" > ap-cat.sh
chmod a+x ap-cat.sh
export DISPLAY=1
echo {passphrase} | SSH_ASKPASS=./ap-cat.sh ssh-add -t {duration} {path}
""".format(path=path0, passphrase=passphrase, duration=duration)
try:
j.sal.process.executeBashScript(content=C, showout=False)
finally:
j.sal.fs.remove("ap-cat.sh")
else:
# load without passphrase
cmd = "ssh-add -t %s %s " % (duration, path0)
j.sal.process.execute(cmd)
self._sshagent = None # to make sure it gets loaded again
if returnObj:
data = {}
data["path"] = path
return self.get(instance=name, data=data)
def sshagent_init(self):
'''
js9 'j.clients.sshkey.sshagent_init()'
'''
bashprofile_path = os.path.expanduser("~/.bash_profile")
if not j.sal.fs.exists(bashprofile_path):
j.sal.process.execute('touch %s' % bashprofile_path)
content = j.sal.fs.readFile(bashprofile_path)
out = ""
for line in content.split("\n"):
if line.find("#JSSSHAGENT") != -1:
continue
if line.find("SSH_AUTH_SOCK") != -1:
continue
out += "%s\n" % line
out += "export SSH_AUTH_SOCK=%s" % self._get_ssh_socket_path()
out = out.replace("\n\n\n", "\n\n")
out = out.replace("\n\n\n", "\n\n")
j.sal.fs.writeFile(bashprofile_path, out)
def _init_ssh_env(self, force=True):
if force or "SSH_AUTH_SOCK" not in os.environ:
os.putenv("SSH_AUTH_SOCK", self._get_ssh_socket_path())
os.environ["SSH_AUTH_SOCK"] = self._get_ssh_socket_path()
def _get_ssh_socket_path(self):
if "SSH_AUTH_SOCK" in os.environ:
return(os.environ["SSH_AUTH_SOCK"])
socketpath = "%s/sshagent_socket" % j.dirs.TMPDIR
os.environ['SSH_AUTH_SOCK'] = socketpath
return socketpath
def sshagent_check(self):
"""
will check that agent started if not will start it.
"""
if "SSH_AUTH_SOCK" not in os.environ:
self._init_ssh_env()
self.sshagent_init()
if not self.sshagent_available():
self.logger.info('Will start agent')
self.sshagent_start()
def sshkey_path_get(self, keyname, die=True):
"""
Returns Path of public key that is loaded in the agent
@param keyname: name of key loaded to agent to get its path
"""
keyname = j.sal.fs.getBaseName(keyname)
for item in j.clients.sshkey.list():
if item.endswith(keyname):
return item
if die:
raise RuntimeError(
"Did not find key with name:%s, check its loaded in ssh-agent with ssh-add -l" %
keyname)
def sshkey_pub_get(self, keyname, die=True):
"""
Returns Content of public key that is loaded in the agent
@param keyname: name of key loaded to agent to get content from
"""
keyname = j.sal.fs.getBaseName(keyname)
for name, pubkey in j.clients.sshkey.list(True):
if name.endswith(keyname):
return pubkey
if die:
raise RuntimeError(
"Did not find key with name:%s, check its loaded in ssh-agent with ssh-add -l" %
keyname)
# SHOULD NOT USE use the SSHKey
# def add(self, keyname_path):
# """
# adds sshkey to ssh-agent
# :param key: can be path or name of key
# """
# if keyname_path ==j.sal.fs.getBaseName(keyname_path):
# #is keyname
# keyname=keyname_path
# keypath = "%s/.ssh/%s"%( j.dirs.HOMEDIR,keyname)
# else:
# if not j.sal.fs.exists(keyname_path):
# raise ValueError("cannot find key with path: %s" % keyname_path)
# keyname=j.sal.fs.getBaseName(keyname_path)
# keypath=keyname_path
# if keyname in self.listnames():
# return True
# cmd = "ssh-add %s" % keypath
# return j.sal.process.executeInteractive(cmd)
def list(self, key_included=False):
"""
list ssh keys from the agent
:param key_included:
:return: list of paths
"""
# check if we can get keys, if not try to load the ssh-agent (need to check on linux)
try:
res = [item.keyname for item in self.sshagent.get_keys()]
except Exception as e:
self.sshagent_check()
res = [item.keyname for item in self.sshagent.get_keys()]
if key_included:
raise RuntimeError("not implemented yet")
return res
# if "SSH_AUTH_SOCK" not in os.environ:
# self._init_ssh_env()
# self.sshagent_check()
# cmd = "ssh-add -L"
# return_code, out, err = j.sal.process.execute(cmd, showout=False, die=False, timeout=1)
# if return_code:
# if return_code == 1 and out.find("The agent has no identities") != -1:
# return []
# raise RuntimeError("error during listing of keys :%s" % err)
# keys = [line.split()
# for line in out.splitlines() if len(line.split()) == 3]
# if key_included:
# return list(map(lambda key: [key[2], ' '.join(key[0:2])], keys))
# else:
# return list(map(lambda key: key[2], keys))
def listnames(self):
return [j.sal.fs.getBaseName(item) for item in self.list()]
def exists(self, name):
name = j.sal.fs.getBaseName(name)
return name in self.listnames()
def knownhosts_remove(self, item):
"""
:param item: is ip addr or hostname to be removed from known_hosts
"""
path = "%s/.ssh/known_hosts" % j.dirs.HOMEDIR
if j.sal.fs.exists(path):
out = ""
for line in j.sal.fs.readFile(path).split("\n"):
if line.find(item) is not -1:
continue
out += "%s\n" % line
j.sal.fs.writeFile(path, out)
def sshagent_start(self):
"""
start ssh-agent, kills other agents if more than one are found
"""
socketpath = self._get_ssh_socket_path()
ssh_agents = j.sal.process.getPidsByFilter('ssh-agent')
for pid in ssh_agents:
p = j.sal.process.getProcessObject(pid)
if socketpath not in p.cmdline():
j.sal.process.kill(pid)
if not j.sal.fs.exists(socketpath):
j.sal.fs.createDir(j.sal.fs.getParent(socketpath))
# ssh-agent not loaded
self.logger.info("load ssh agent")
rc, out, err = j.sal.process.execute("ssh-agent -a %s" % socketpath,
die=False,
showout=False,
timeout=20)
if rc > 0:
raise RuntimeError("Could not start ssh-agent, \nstdout:%s\nstderr:%s\n" % (out, err))
else:
if not j.sal.fs.exists(socketpath):
err_msg = "Serious bug, ssh-agent not started while there was no error, "\
"should never get here"
raise RuntimeError(err_msg)
# get pid from out of ssh-agent being started
piditems = [item for item in out.split("\n") if item.find("pid") != -1]
# print(piditems)
if len(piditems) < 1:
self.logger.debug("results was: %s", out)
raise RuntimeError("Cannot find items in ssh-add -l")
self._init_ssh_env()
pid = int(piditems[-1].split(" ")[-1].strip("; "))
socket_path = j.sal.fs.joinPaths("/tmp", "ssh-agent-pid")
j.sal.fs.writeFile(socket_path, str(pid))
self.sshagent_init()
j.clients.sshkey._sshagent = None
return
# ssh agent should be loaded because ssh-agent socket has been
# found
if os.environ.get("SSH_AUTH_SOCK") != socketpath:
self._init_ssh_env()
j.clients.sshkey._sshagent = None
def sshagent_available(self):
"""
Check if agent available
:return: bool
"""
socket_path = self._get_ssh_socket_path()
if not j.sal.fs.exists(socket_path):
return False
if "SSH_AUTH_SOCK" not in os.environ:
self._init_ssh_env()
return_code, out, _ = j.sal.process.execute("ssh-add -l",
showout=False,
die=False)
if 'The agent has no identities.' in out:
return True
if return_code != 0:
# Remove old socket if can't connect
if j.sal.fs.exists(socket_path):
j.sal.fs.remove(socket_path)
return False
else:
return True
def sshagent_kill(self, socketpath=None):
"""
Kill all agents if more than one is found
:param socketpath: socketpath
"""
j.sal.process.killall("ssh-agent")
socketpath = self._get_ssh_socket_path() if not socketpath else socketpath
j.sal.fs.remove(socketpath)
j.sal.fs.remove(j.sal.fs.joinPaths('/tmp', "ssh-agent-pid"))
self.logger.debug("ssh-agent killed")
def test(self):
"""
js9 'j.clients.sshkey.test()'
"""
self.logger_enable()
self.logger.info("sshkeys:%s" % j.clients.sshkey.listnames())
self.sshagent_kill() # goal is to kill & make sure it get's loaded automatically
# lets generate an sshkey with a passphrase
data = {}
data["passphrase_"] = "12345"
skey = self.get(instance="test", data=data)
assert skey.passphrase == "12345"
skey.passphrase = "123456"
assert skey.passphrase == "123456"
skey.generate(reset=True)
skey.load()
assert skey.is_loaded()
if not j.core.platformtype.myplatform.isMac:
# on mac does not seem to work
skey.unload()
assert skey.is_loaded() is False
skey = self.get(instance="test2", data=data)
skey.generate()
skey.load()
assert skey.is_loaded()
skey.unload()
assert skey.is_loaded() is False
assert self.sshagent_available()
self.sshagent_kill()
assert self.sshagent_available() is False
self.sshagent_start()
assert self.sshagent_available()
| {
"content_hash": "3da0afb1e07716e5a435c3c2ec0f727f",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 103,
"avg_line_length": 34.472727272727276,
"alnum_prop": 0.538878842676311,
"repo_name": "Jumpscale/core9",
"id": "ace550d739395727bdbe44ce450c3248c2ab9cde",
"size": "13272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JumpScale9/clients/sshkey/SSHKeys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cap'n Proto",
"bytes": "7695"
},
{
"name": "Lua",
"bytes": "31125"
},
{
"name": "Python",
"bytes": "1171144"
},
{
"name": "Shell",
"bytes": "42008"
}
],
"symlink_target": ""
} |
"""
Name: structures.py
Date Created: 1/31/2018
Last Modified: 1/31/2018
Purpose: Stores custom data structures utilized in the thesis.
"""
class PriorityQueueNode(object):
def __init__(self, priority, data):
"""
Creates a node object to be stored in a priority queue.
priority: The priority assigned to this node.
data: The data stored in this node.
"""
self.priority = priority
self.data = data
def __eq__(self, other):
if isinstance(other, PriorityQueueNode):
return self.priority == other.priority
raise TypeError
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, PriorityQueueNode):
return self.priority < other.priority
raise TypeError
def __le__(self, other):
if isinstance(other, PriorityQueueNode):
return self.priority <= other.priority
raise TypeError
def __gt__(self, other):
if isinstance(other, PriorityQueueNode):
return self.priority > other.priority
raise TypeError
def __ge__(self, other):
if isinstance(other, PriorityQueueNode):
return self.priority >= other.priority
raise TypeError
def __str__(self):
return "Priority: {0}, \n\tData: {1}".format(self.priority, self.data)
def get_priority(self):
"""
Gets the priority of this node.
returns: The priority of this node, a sortable data type.
"""
return self.priority
def get_data(self):
"""
Gets the data stored in this node.
returns: The data stored in this node.
"""
return self.data
def set_priority(self, priority):
"""
Updates the priority of this node.
priority: The new priority for this node.
"""
self.priority = priority
class PriorityQueue(object):
def __init__(self, order="ASC"):
"""
Create an empty prority queue. This priority queue allows for indexing of elements in addition to
queue insertion and deletion routines.
order: The order items are sorted in the priority queue, either ASC or DES.
"""
self.queue = []
order.capitalize()
if order != "ASC" or order != "DES":
self.order = "ASC"
else:
self.order = order
def __iter__(self):
"""
Get an iterator over the items in the priority queue.
returns: An iterator object.
"""
return self.queue.__iter__()
def __len__(self):
return self.queue.__len__()
def __str__(self):
s = ""
for iteration in range(len(self.queue)):
s += "{0}) {1}\n".format(iteration, self.queue[iteration])
return s
def __getitem__(self, key):
return self.queue.__getitem__(key)
def __delitem__(self, key):
self.queue.__delitem__(key)
def __setitem__(self, key, priority):
if 0 <= key <= len(self.queue):
if isinstance(priority, self.queue[key].get_priority()):
self.queue[key].set_priority(priority)
self.queue.sort()
else:
raise TypeError("New priority data type does not match the data type for the current priority of the item!")
else:
raise IndexError("Index exceeded bounds of the priority queue!")
def __contains__(self, data):
for item in self.queue:
if item.get_data() == data:
return True
return False
def dequeue(self):
"""
Get the item at the front of the priority queue.
returns: An item from the queue.
"""
if self.queue:
return self.queue.pop(0).get_data()
else:
raise IndexError("No items remaining in the priority queue!")
def empty(self):
"""
Determines if the priority queue is empty or not.
returns: True of the queue is empty, False otherwise.
"""
return self.queue == []
def enqueue(self, priority, data):
"""
Places an item in the priority queue with the given priority.
priority: The items priority in the queue.
data: Whatever is to be stored in the queue.
"""
self.queue.append(PriorityQueueNode(priority, data))
self.sort()
def sort(self):
"""
Sort the priority queue.
"""
self.queue.sort(reverse=False if self.order=="ASC" else True)
| {
"content_hash": "4eff8e9ed3d88bf0e67fdd5d272ed7f4",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 124,
"avg_line_length": 31.032467532467532,
"alnum_prop": 0.5503243356350701,
"repo_name": "gollum18/malmo_thesis",
"id": "bee8ee1cf5c2e4ca15121bd16ed0951eb4604246",
"size": "4779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/structures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55488"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_authenticationsamlpolicy_binding(base_resource) :
""" Binding class showing the authenticationsamlpolicy that can be bound to vpnglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._secondary = False
self._groupextraction = False
self.___count = 0
@property
def priority(self) :
ur"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""The name of the policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""The name of the policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def secondary(self) :
ur"""Bind the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only to a primary authentication server but also to a secondary authentication server. User groups are aggregated across both authentication servers. The user name must be exactly the same on both authentication servers, but the authentication servers can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
ur"""Bind the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only to a primary authentication server but also to a secondary authentication server. User groups are aggregated across both authentication servers. The user name must be exactly the same on both authentication servers, but the authentication servers can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def groupextraction(self) :
ur"""Bind the Authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called it primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
ur"""Bind the Authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called it primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_authenticationsamlpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_authenticationsamlpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnglobal_authenticationsamlpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnglobal_authenticationsamlpolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a vpnglobal_authenticationsamlpolicy_binding resources.
"""
try :
obj = vpnglobal_authenticationsamlpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of vpnglobal_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_authenticationsamlpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count vpnglobal_authenticationsamlpolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_authenticationsamlpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of vpnglobal_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_authenticationsamlpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class vpnglobal_authenticationsamlpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_authenticationsamlpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_authenticationsamlpolicy_binding = [vpnglobal_authenticationsamlpolicy_binding() for _ in range(length)]
| {
"content_hash": "2ca5d667ee09290aed3cbe410245d651",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 418,
"avg_line_length": 35.3421052631579,
"alnum_prop": 0.7359146190121618,
"repo_name": "benfinke/ns_python",
"id": "10e07a11269099e5d913e5e28230ab936f707c0a",
"size": "8672",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_authenticationsamlpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
'''
@author: Pierre Thibault (pierre.thibault1 -at- gmail.com)
@license: MIT
@since: 2011-04-26
'''
from __future__ import with_statement
__docformat__ = "epytext en"
import filecmp
import os
import shutil
import StringIO
import sys
import unittest
import insert_imports
class Test(unittest.TestCase):
"""
Here we copy a source as a temp dir, we process all the files of temp dir
and we verify we have the result expected by comparing with a result dir.
"""
_temp_dir_created = False
_SOURCE_DIR = "source_dir"
_TMP_DIR = "source_dir~"
_RESULT_DIR = "result_dir"
def setUp(self):
if not self._temp_dir_created:
self.__delete_tmp_dir()
shutil.copytree(self._SOURCE_DIR, self._TMP_DIR)
self._temp_dir_created = True
def tearDown(self):
self.__delete_tmp_dir()
def test_main(self):
for (dirpath, dirnames, filenames) in os.walk(self._TMP_DIR): #@UnusedVariable
filenames = ["%s/%s" % (self._TMP_DIR, f) \
for f in filenames if f.endswith(".py")]
insert_imports.main(filenames)
dircmp = filecmp.dircmp(self._TMP_DIR, self._RESULT_DIR)
self.assertTrue(dircmp.left_only == dircmp.right_only == [], \
"Some files missing in copy \n Left:\n%s\nRight:\n%s" % \
(str(dircmp.left_only), str(dircmp.right_only)))
self.assertTrue(dircmp.diff_files == [], \
"Some files not identical: " + str(dircmp.diff_files))
def test_help(self):
for param in ("-h", "--help"):
string_file = StringIO.StringIO()
try:
sys.stdout = string_file
try:
insert_imports.main((param,)) # Will do a system exit
except:
pass
self.assertEqual(insert_imports.__doc__+"\n", \
string_file.getvalue())
finally:
string_file.close()
sys.stdout = sys.__stdout__
def test_bad_flag(self):
string_file = StringIO.StringIO()
try:
sys.stdout = string_file
try:
insert_imports.main(("--bad_flag",)) # Will do a system exit
except:
pass
self.assertEqual(self.__invalid_arguments_message(), \
string_file.getvalue())
finally:
string_file.close()
sys.stdout = sys.__stdout__
def test_bad_file(self):
string_file = StringIO.StringIO()
try:
sys.stdout = string_file
try:
insert_imports.main(("xxx",)) # Will do a system exit
except:
pass
self.assertTrue(string_file.getvalue() \
.startswith('Unable to process source_file "%s": ' % ("xxx",)))
finally:
string_file.close()
sys.stdout = sys.__stdout__
def test_zero_arg(self):
string_file = StringIO.StringIO()
try:
sys.stdout = string_file
try:
insert_imports.main(()) # Will do a system exit
except:
pass
self.assertEqual(self.__invalid_arguments_message(), \
string_file.getvalue())
finally:
string_file.close()
sys.stdout = sys.__stdout__
def __invalid_arguments_message(self):
return "".join(("Invalid argument(s).", "\n", insert_imports.__doc__, \
"\n"))
def __delete_tmp_dir(self):
if os.path.exists(self._TMP_DIR):
shutil.rmtree(self._TMP_DIR)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "6048be2fce81cde26db1c033d00fc1c1",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 86,
"avg_line_length": 31.54471544715447,
"alnum_prop": 0.5152061855670103,
"repo_name": "Pierre-Thibault/neo-insert-imports",
"id": "4a339566d32bfada1aa105d03688a2e7ebbc7855",
"size": "3904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_insert_imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67069"
}
],
"symlink_target": ""
} |
import pytest
from awx.main import models
from awx.main.analytics import collectors
@pytest.mark.django_db
def test_empty():
assert collectors.counts(None) == {
"active_user_sessions": 0,
"active_anonymous_sessions": 0,
"active_sessions": 0,
"active_host_count": 0,
"credential": 0,
"custom_inventory_script": 0,
"custom_virtualenvs": 0, # dev env ansible3
"host": 0,
"inventory": 0,
"inventories": {"normal": 0, "smart": 0},
"job_template": 0,
"notification_template": 0,
"organization": 0,
"project": 0,
"running_jobs": 0,
"schedule": 0,
"team": 0,
"user": 0,
"workflow_job_template": 0,
"unified_job": 0,
"pending_jobs": 0,
}
@pytest.mark.django_db
def test_database_counts(
organization_factory, job_template_factory, workflow_job_template_factory
):
objs = organization_factory("org", superusers=["admin"])
jt = job_template_factory(
"test",
organization=objs.organization,
inventory="test_inv",
project="test_project",
credential="test_cred",
)
workflow_job_template_factory("test")
models.Team(organization=objs.organization).save()
models.Host(inventory=jt.inventory).save()
models.Schedule(
rrule="DTSTART;TZID=America/New_York:20300504T150000",
unified_job_template=jt.job_template,
).save()
models.CustomInventoryScript(organization=objs.organization).save()
counts = collectors.counts(None)
for key in (
"organization",
"team",
"user",
"inventory",
"credential",
"project",
"job_template",
"workflow_job_template",
"host",
"schedule",
"custom_inventory_script",
):
assert counts[key] == 1
@pytest.mark.django_db
def test_inventory_counts(organization_factory, inventory_factory):
(inv1, inv2, inv3) = [inventory_factory(f"inv-{i}") for i in range(3)]
s1 = inv1.inventory_sources.create(name="src1", source="ec2")
s2 = inv1.inventory_sources.create(name="src2", source="file")
s3 = inv1.inventory_sources.create(name="src3", source="gce")
s1.hosts.create(name="host1", inventory=inv1)
s1.hosts.create(name="host2", inventory=inv1)
s1.hosts.create(name="host3", inventory=inv1)
s2.hosts.create(name="host4", inventory=inv1)
s2.hosts.create(name="host5", inventory=inv1)
s3.hosts.create(name="host6", inventory=inv1)
s1 = inv2.inventory_sources.create(name="src1", source="ec2")
s1.hosts.create(name="host1", inventory=inv2)
s1.hosts.create(name="host2", inventory=inv2)
s1.hosts.create(name="host3", inventory=inv2)
inv_counts = collectors.inventory_counts(None)
assert {
inv1.id: {
"name": "inv-0",
"kind": "",
"hosts": 6,
"sources": 3,
"source_list": [
{"name": "src1", "source": "ec2", "num_hosts": 3},
{"name": "src2", "source": "file", "num_hosts": 2},
{"name": "src3", "source": "gce", "num_hosts": 1},
],
},
inv2.id: {
"name": "inv-1",
"kind": "",
"hosts": 3,
"sources": 1,
"source_list": [{"name": "src1", "source": "ec2", "num_hosts": 3}],
},
inv3.id: {
"name": "inv-2",
"kind": "",
"hosts": 0,
"sources": 0,
"source_list": [],
},
} == inv_counts
| {
"content_hash": "975fdca1e9889395a3a907eeb49e716c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 79,
"avg_line_length": 29.422764227642276,
"alnum_prop": 0.5520862116606797,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "877f21badadfc6a0e0505ffd3073dd6e1869d225",
"size": "3619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/tests/functional/analytics/test_counts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.http import HttpResponse
import json
from kwue.DB_functions.tag_db_functions import *
from kwue.helper_functions.semantic_tag_helpers import get_semantic_tags
from django.views.decorators.csrf import csrf_exempt
def search_semantic_tags(req):
semantic_tags = get_semantic_tags(req.GET.dict()['tag_name'])
return HttpResponse(json.dumps(semantic_tags), content_type='application/json')
@csrf_exempt
def tag_food(req):
dict = req.POST.dict()
dict['generic_id'] = dict['tagged_food_id']
dict["type"] = "Food"
is_success=False
if db_insert_tag(dict):
is_success = True
return HttpResponse({'is_success': is_success}, content_type='application/json')
@csrf_exempt
def tag_user(req):
dict = req.POST.dict()
dict['generic_id'] = dict['tagged_user_id']
dict["type"] = "User"
is_success=False
if db_insert_tag(dict):
is_success = True
return HttpResponse({'is_success': is_success}, content_type='application/json') | {
"content_hash": "796e774b9a83070a94205109ac8f1f96",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.6951710261569416,
"repo_name": "bounswe/bounswe2016group4",
"id": "82e77b1ced250193183fcce02da28b402c901592",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kwueBackend/kwue/controllers/tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35468"
},
{
"name": "HTML",
"bytes": "341255"
},
{
"name": "Java",
"bytes": "41773"
},
{
"name": "JavaScript",
"bytes": "83823"
},
{
"name": "Python",
"bytes": "86442"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the exit status and error output if an SConstruct file
throws a NameError (tries to reference a Python variable that
doesn't exist).
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
test.write('SConstruct', """\
a == 1
""")
test.run(status = 2, stderr = """\
NameError: [^\n]*
File ".+SConstruct", line 1:
a == 1
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "a5ca7ee566f84b76f199920dd0b440ec",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 61,
"avg_line_length": 19.413793103448278,
"alnum_prop": 0.6589698046181173,
"repo_name": "andrewyoung1991/scons",
"id": "0281253d3d61ed02ff85a19a22b274e68213e6fb",
"size": "1665",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/Errors/NameError.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "746"
},
{
"name": "C++",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "44714"
},
{
"name": "Python",
"bytes": "7385906"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52194"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
"""Utility methods for working with logs."""
import os
import time
REQUEST_KEY = 'REQUEST_ID_HASH'
def RequestID():
"""Returns the ID of the current request assigned by App Engine."""
return os.environ.get(REQUEST_KEY, None)
def ParseLogEntry(entry):
"""Parses a single log entry emitted by app_logging.AppLogsHandler.
Parses a log entry of the form LOG <level> <timestamp> <message> where the
level is in the range [0, 4]. If the entry is not of that form, take the whole
entry to be the message. Null characters in the entry are replaced by
newlines.
Args:
entry: The log entry to parse.
Returns:
A (timestamp, level, message) tuple.
"""
split = entry.split(' ', 3)
if len(split) == 4 and split[0] == 'LOG':
level = split[1]
timestamp = split[2]
message = split[3]
try:
message = str(message)
timestamp = int(timestamp)
level = int(level)
except ValueError:
pass
else:
if 0 <= level <= 4:
return timestamp, level, message.replace('\0', '\n')
usec = int(time.time() * 1e6)
return usec, 3, entry.replace('\0', '\n')
def ParseLogs(logs):
"""Parses a str containing newline separated log entries.
Parses a series of log entries in the form LOG <level> <timestamp> <message>
where the level is in the range [0, 4]. Null characters in the entry are
replaced by newlines.
Args:
logs: A string containing the log entries.
Returns:
A list of (timestamp, level, message) tuples.
"""
return [ParseLogEntry(line) for line in logs.split('\n') if line]
| {
"content_hash": "70dccc46c85d24a245fce2c0d63f518c",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 25.5,
"alnum_prop": 0.6590765338393422,
"repo_name": "adviti/melange",
"id": "1ac10f56826407c3681e69aac072bd92b48d3333",
"size": "2186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/api/logservice/logsutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
__author__ = 'Daniel Lindsley'
__license__ = 'BSD'
__version__ = (1, 4, 0)
from .resources import Resource
| {
"content_hash": "7ecdcf9c433b55812f2515c828b29535",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 18.166666666666668,
"alnum_prop": 0.6146788990825688,
"repo_name": "rpedigoni/restless",
"id": "8a175ce8f9f3af5eb0fd689bc51c9def3a5717f0",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restless/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86815"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
"""A pure-Python Python bytecode interpreter."""
# Based on:
# pyvm2 by Paul Swartz (z3p), from http://www.twistedmatrix.com/users/z3p/
from __future__ import print_function, division
import dis
import inspect
import linecache
import logging
import operator
import sys
import six
from six.moves import reprlib
PY3, PY2 = six.PY3, not six.PY3
from .pyobj import Cell, Frame, Block, Method, Function, Generator
log = logging.getLogger(__name__)
if six.PY3:
byteint = lambda b: b
else:
byteint = ord
# Create a repr that won't overflow.
repr_obj = reprlib.Repr()
repr_obj.maxother = 120
repper = repr_obj.repr
import inspect
import symex
class VirtualMachineError(Exception):
"""For raising errors in the operation of the VM."""
pass
class VirtualMachine(object):
def __init__(self, symbolic_on=False):
# The call stack of frames.
self.frames = []
# The current frame.
self.frame = None
self.return_value = None
self.last_exception = None
self.symbolic_on = symbolic_on
self.interesting_paths = {} # code obj -> list(path)
self._cur_interesting_path = []
self._co_to_decls = {}
self._co_to_envs = {}
def get_decl(self, code_obj):
return self._co_to_decls[code_obj]
def get_env(self, code_obj):
return self._co_to_envs[code_obj].copy()
def set_co_to_decls(self, co_to_decls):
self._co_to_decls.update(co_to_decls)
def set_co_to_envs(self, co_to_envs):
self._co_to_envs.update(co_to_envs)
@property
def cur_interesting_path(self):
return self._cur_interesting_path[-1]
def top(self):
"""Return the value at the top of the stack, with no changes."""
return self.frame.stack[-1]
def pop(self, i=0):
"""Pop a value from the stack.
Default to the top of the stack, but `i` can be a count from the top
instead.
"""
return self.frame.stack.pop(-1-i)
def push(self, *vals):
"""Push values onto the value stack."""
self.frame.stack.extend(vals)
def popn(self, n):
"""Pop a number of values from the value stack.
A list of `n` values is returned, the deepest value first.
"""
if n:
ret = self.frame.stack[-n:]
self.frame.stack[-n:] = []
return ret
else:
return []
def peek(self, n):
"""Get a value `n` entries down in the stack, without changing the stack."""
return self.frame.stack[-n]
def jump(self, jump):
"""Move the bytecode pointer to `jump`, so it will execute next."""
self.frame.f_lasti = jump
def push_block(self, type, handler=None, level=None):
if level is None:
level = len(self.frame.stack)
self.frame.block_stack.append(Block(type, handler, level))
def pop_block(self):
return self.frame.block_stack.pop()
def make_frame(self, code, callargs={}, f_globals=None, f_locals=None):
log.info("make_frame: code=%r, callargs=%s" % (code, repper(callargs)))
if f_globals is not None:
f_globals = f_globals
if f_locals is None:
f_locals = f_globals
elif self.frames:
f_globals = self.frame.f_globals
f_locals = {}
else:
f_globals = f_locals = {
'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None,
'__package__': None,
}
f_locals.update(callargs)
frame = Frame(code, f_globals, f_locals, self.frame)
return frame
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self):
self.frames.pop()
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
def print_frames(self):
"""Print the call stack, for debugging."""
for f in self.frames:
filename = f.f_code.co_filename
lineno = f.line_number()
print(' File "%s", line %d, in %s' % (
filename, lineno, f.f_code.co_name
))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
print(' ' + line.strip())
def resume_frame(self, frame):
frame.f_back = self.frame
val = self.run_frame(frame)
frame.f_back = None
return val
def add_interesting_path(self, code, path):
if code not in self.interesting_paths:
self.interesting_paths[code] = []
self.interesting_paths[code].append(path)
def fork(self, code, f_globals=None, f_locals=None):
newVM = VirtualMachine(self.symbolic_on)
newVM.interesting_paths = self.interesting_paths.copy()
newVM._co_to_decls = self._co_to_decls.copy()
newVM._co_to_envs = self._co_to_envs.copy()
newVM.frame = self.frame
val = newVM.run_code(code, f_globals=f_globals)
return val
def run_code(self, code, f_globals=None, f_locals=None):
frame = self.make_frame(code, f_globals=f_globals, f_locals=f_locals)
if self.symbolic_on:
paths = self.interesting_paths.get(code, [])
path = paths.pop(0) if paths else None
val = None
if path:
self._cur_interesting_path.append(path[1:]) # skip entry block
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
else:
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
def unwind_block(self, block):
if block.type == 'except-handler':
offset = 3
else:
offset = 0
while len(self.frame.stack) > block.level + offset:
self.pop()
if block.type == 'except-handler':
tb, value, exctype = self.popn(3)
self.last_exception = exctype, value, tb
def parse_byte_and_args(self):
""" Parse 1 - 3 bytes of bytecode into
an instruction and optionally arguments."""
f = self.frame
opoffset = f.f_lasti
byteCode = byteint(f.f_code.co_code[opoffset])
f.f_lasti += 1
byteName = dis.opname[byteCode]
arg = None
arguments = []
if byteCode >= dis.HAVE_ARGUMENT:
arg = f.f_code.co_code[f.f_lasti:f.f_lasti+2]
f.f_lasti += 2
intArg = byteint(arg[0]) + (byteint(arg[1]) << 8)
if byteCode in dis.hasconst:
arg = f.f_code.co_consts[intArg]
elif byteCode in dis.hasfree:
if intArg < len(f.f_code.co_cellvars):
arg = f.f_code.co_cellvars[intArg]
else:
var_idx = intArg - len(f.f_code.co_cellvars)
arg = f.f_code.co_freevars[var_idx]
elif byteCode in dis.hasname:
arg = f.f_code.co_names[intArg]
elif byteCode in dis.hasjrel:
arg = f.f_lasti + intArg
elif byteCode in dis.hasjabs:
arg = intArg
elif byteCode in dis.haslocal:
arg = f.f_code.co_varnames[intArg]
else:
arg = intArg
arguments = [arg]
return byteName, arguments, opoffset
def log(self, byteName, arguments, opoffset):
""" Log arguments, block stack, and data stack for each opcode."""
op = "%d: %s" % (opoffset, byteName)
if arguments:
op += " %r" % (arguments[0],)
indent = " "*(len(self.frames)-1)
stack_rep = repper(self.frame.stack)
block_stack_rep = repper(self.frame.block_stack)
log.info(" %sdata: %s" % (indent, stack_rep))
log.info(" %sblks: %s" % (indent, block_stack_rep))
log.info("%s%s" % (indent, op))
def dispatch(self, byteName, arguments):
""" Dispatch by bytename to the corresponding methods.
Exceptions are caught and set on the virtual machine."""
print('%s %s' % (byteName, arguments))
why = None
try:
if byteName.startswith('UNARY_'):
self.unaryOperator(byteName[6:])
elif byteName.startswith('BINARY_'):
self.binaryOperator(byteName[7:])
elif byteName.startswith('INPLACE_'):
self.inplaceOperator(byteName[8:])
elif 'SLICE+' in byteName:
self.sliceOperator(byteName)
else:
# dispatch
bytecode_fn = getattr(self, 'byte_%s' % byteName, None)
if not bytecode_fn: # pragma: no cover
raise VirtualMachineError(
"unknown bytecode type: %s" % byteName
)
why = bytecode_fn(*arguments)
except:
# deal with exceptions encountered while executing the op.
self.last_exception = sys.exc_info()[:2] + (None,)
log.exception("Caught exception during execution")
why = 'exception'
return why
def manage_block_stack(self, why):
""" Manage a frame's block stack.
Manipulate the block stack and data stack for looping,
exception handling, or returning."""
assert why != 'yield'
block = self.frame.block_stack[-1]
if block.type == 'loop' and why == 'continue':
self.jump(self.return_value)
why = None
return why
self.pop_block()
self.unwind_block(block)
if block.type == 'loop' and why == 'break':
why = None
self.jump(block.handler)
return why
if PY2:
if (
block.type == 'finally' or
(block.type == 'setup-except' and why == 'exception') or
block.type == 'with'
):
if why == 'exception':
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
else:
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
elif PY3:
if (
why == 'exception' and
block.type in ['setup-except', 'finally']
):
self.push_block('except-handler')
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
# PyErr_Normalize_Exception goes here
self.push(tb, value, exctype)
why = None
self.jump(block.handler)
return why
elif block.type == 'finally':
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
return why
def run_frame(self, frame):
"""Run a frame until it returns (somehow).
Exceptions are raised, the return value is returned.
"""
self.push_frame(frame)
while True:
byteName, arguments, opoffset = self.parse_byte_and_args()
if log.isEnabledFor(logging.INFO):
self.log(byteName, arguments, opoffset)
# When unwinding the block stack, we need to keep track of why we
# are doing it.
why = self.dispatch(byteName, arguments)
if why == 'exception':
# TODO: ceval calls PyTraceBack_Here, not sure what that does.
pass
if why == 'reraise':
why = 'exception'
if why != 'yield':
while why and frame.block_stack:
# Deal with any block management we need to do.
why = self.manage_block_stack(why)
if why:
break
# TODO: handle generator exception state
self.pop_frame()
if why == 'exception':
six.reraise(*self.last_exception)
return self.return_value
## Stack manipulation
def byte_LOAD_CONST(self, const):
self.push(const)
def byte_POP_TOP(self):
self.pop()
def byte_DUP_TOP(self):
self.push(self.top())
def byte_DUP_TOPX(self, count):
items = self.popn(count)
for i in [1, 2]:
self.push(*items)
def byte_DUP_TOP_TWO(self):
# Py3 only
a, b = self.popn(2)
self.push(a, b, a, b)
def byte_ROT_TWO(self):
a, b = self.popn(2)
self.push(b, a)
def byte_ROT_THREE(self):
a, b, c = self.popn(3)
self.push(c, a, b)
def byte_ROT_FOUR(self):
a, b, c, d = self.popn(4)
self.push(d, a, b, c)
## Names
def byte_LOAD_NAME(self, name):
frame = self.frame
if name in frame.f_locals:
val = frame.f_locals[name]
elif name in frame.f_globals:
val = frame.f_globals[name]
elif name in frame.f_builtins:
val = frame.f_builtins[name]
else:
raise NameError("name '%s' is not defined" % name)
self.push(val)
def byte_STORE_NAME(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_NAME(self, name):
del self.frame.f_locals[name]
def byte_LOAD_FAST(self, name):
if name in self.frame.f_locals:
val = self.frame.f_locals[name]
else:
raise UnboundLocalError(
"local variable '%s' referenced before assignment" % name
)
self.push(val)
def byte_STORE_FAST(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_FAST(self, name):
del self.frame.f_locals[name]
def byte_LOAD_GLOBAL(self, name):
f = self.frame
if name in f.f_globals:
val = f.f_globals[name]
elif name in f.f_builtins:
val = f.f_builtins[name]
else:
raise NameError("global name '%s' is not defined" % name)
self.push(val)
def byte_LOAD_DEREF(self, name):
self.push(self.frame.cells[name].get())
def byte_STORE_DEREF(self, name):
self.frame.cells[name].set(self.pop())
def byte_LOAD_LOCALS(self):
self.push(self.frame.f_locals)
## Operators
UNARY_OPERATORS = {
'POSITIVE': operator.pos,
'NEGATIVE': operator.neg,
'NOT': operator.not_,
'CONVERT': repr,
'INVERT': operator.invert,
}
def unaryOperator(self, op):
x = self.pop()
self.push(self.UNARY_OPERATORS[op](x))
BINARY_OPERATORS = {
'POWER': pow,
'MULTIPLY': operator.mul,
'DIVIDE': getattr(operator, 'div', lambda x, y: None),
'FLOOR_DIVIDE': operator.floordiv,
'TRUE_DIVIDE': operator.truediv,
'MODULO': operator.mod,
'ADD': operator.add,
'SUBTRACT': operator.sub,
'SUBSCR': operator.getitem,
'LSHIFT': operator.lshift,
'RSHIFT': operator.rshift,
'AND': operator.and_,
'XOR': operator.xor,
'OR': operator.or_,
}
def binaryOperator(self, op):
x, y = self.popn(2)
self.push(self.BINARY_OPERATORS[op](x, y))
def inplaceOperator(self, op):
x, y = self.popn(2)
if op == 'POWER':
x **= y
elif op == 'MULTIPLY':
x *= y
elif op in ['DIVIDE', 'FLOOR_DIVIDE']:
x //= y
elif op == 'TRUE_DIVIDE':
x /= y
elif op == 'MODULO':
x %= y
elif op == 'ADD':
x += y
elif op == 'SUBTRACT':
x -= y
elif op == 'LSHIFT':
x <<= y
elif op == 'RSHIFT':
x >>= y
elif op == 'AND':
x &= y
elif op == 'XOR':
x ^= y
elif op == 'OR':
x |= y
else: # pragma: no cover
raise VirtualMachineError("Unknown in-place operator: %r" % op)
self.push(x)
def sliceOperator(self, op):
start = 0
end = None # we will take this to mean end
op, count = op[:-2], int(op[-1])
if count == 1:
start = self.pop()
elif count == 2:
end = self.pop()
elif count == 3:
end = self.pop()
start = self.pop()
l = self.pop()
if end is None:
end = len(l)
if op.startswith('STORE_'):
l[start:end] = self.pop()
elif op.startswith('DELETE_'):
del l[start:end]
else:
self.push(l[start:end])
COMPARE_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
#lambda x, y: x in y,
symex.symbolic_in,
lambda x, y: x not in y,
#lambda x, y: x is y,
symex.symbolic_is,
lambda x, y: x is not y,
lambda x, y: issubclass(x, Exception) and issubclass(x, y),
]
def byte_COMPARE_OP(self, opnum):
x, y = self.popn(2)
self.push(self.COMPARE_OPERATORS[opnum](x, y))
## Attributes and indexing
def byte_LOAD_ATTR(self, attr):
obj = self.pop()
val = getattr(obj, attr)
self.push(val)
def byte_STORE_ATTR(self, name):
val, obj = self.popn(2)
setattr(obj, name, val)
def byte_DELETE_ATTR(self, name):
obj = self.pop()
delattr(obj, name)
def byte_STORE_SUBSCR(self):
val, obj, subscr = self.popn(3)
obj[subscr] = val
def byte_DELETE_SUBSCR(self):
obj, subscr = self.popn(2)
del obj[subscr]
## Building
def byte_BUILD_TUPLE(self, count):
elts = self.popn(count)
self.push(tuple(elts))
def byte_BUILD_LIST(self, count):
elts = self.popn(count)
self.push(elts)
def byte_BUILD_SET(self, count):
# TODO: Not documented in Py2 docs.
elts = self.popn(count)
self.push(set(elts))
def byte_BUILD_MAP(self, size):
# size is ignored.
self.push({})
def byte_STORE_MAP(self):
the_map, val, key = self.popn(3)
the_map[key] = val
self.push(the_map)
def byte_UNPACK_SEQUENCE(self, count):
seq = self.pop()
for x in reversed(seq):
self.push(x)
def byte_BUILD_SLICE(self, count):
if count == 2:
x, y = self.popn(2)
self.push(slice(x, y))
elif count == 3:
x, y, z = self.popn(3)
self.push(slice(x, y, z))
else: # pragma: no cover
raise VirtualMachineError("Strange BUILD_SLICE count: %r" % count)
def byte_LIST_APPEND(self, count):
val = self.pop()
the_list = self.peek(count)
the_list.append(val)
def byte_SET_ADD(self, count):
val = self.pop()
the_set = self.peek(count)
the_set.add(val)
def byte_MAP_ADD(self, count):
val, key = self.popn(2)
the_map = self.peek(count)
the_map[key] = val
## Printing
if 0: # Only used in the interactive interpreter, not in modules.
def byte_PRINT_EXPR(self):
print(self.pop())
def byte_PRINT_ITEM(self):
item = self.pop()
self.print_item(item)
def byte_PRINT_ITEM_TO(self):
to = self.pop()
item = self.pop()
self.print_item(item, to)
def byte_PRINT_NEWLINE(self):
self.print_newline()
def byte_PRINT_NEWLINE_TO(self):
to = self.pop()
self.print_newline(to)
def print_item(self, item, to=None):
if to is None:
to = sys.stdout
if to.softspace:
print(" ", end="", file=to)
to.softspace = 0
print(item, end="", file=to)
if isinstance(item, str):
if (not item) or (not item[-1].isspace()) or (item[-1] == " "):
to.softspace = 1
else:
to.softspace = 1
def print_newline(self, to=None):
if to is None:
to = sys.stdout
print("", file=to)
to.softspace = 0
## Jumps
def byte_JUMP_FORWARD(self, jump):
self.jump(jump)
def byte_JUMP_ABSOLUTE(self, jump):
self.jump(jump)
if 0: # Not in py2.7
def byte_JUMP_IF_TRUE(self, jump):
val = self.top()
if val:
self.jump(jump)
def byte_JUMP_IF_FALSE(self, jump):
val = self.top()
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
self.jump(jump)
elif branch_cond == 'FALSE':
val.isFalse()
else:
import ipdb
ipdb.set_trace()
pass
else:
if val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_TRUE_SYM(jump)
else:
val = self.pop()
if val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
elif branch_cond == 'FALSE':
val.isFalse()
self.jump(jump)
else:
import ipdb
ipdb.set_trace()
pass
else:
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_FALSE_SYM(jump)
else:
val = self.pop()
if not val:
self.jump(jump)
def byte_JUMP_IF_TRUE_OR_POP(self, jump):
val = self.top()
if val:
self.jump(jump)
else:
self.pop()
def byte_JUMP_IF_FALSE_OR_POP(self, jump):
val = self.top()
if not val:
self.jump(jump)
else:
self.pop()
## Blocks
def byte_SETUP_LOOP(self, dest):
self.push_block('loop', dest)
def byte_GET_ITER(self):
self.push(iter(self.pop()))
def byte_FOR_ITER(self, jump):
iterobj = self.top()
try:
v = next(iterobj)
self.push(v)
except StopIteration:
self.pop()
self.jump(jump)
def byte_BREAK_LOOP(self):
return 'break'
def byte_CONTINUE_LOOP(self, dest):
# This is a trick with the return value.
# While unrolling blocks, continue and return both have to preserve
# state as the finally blocks are executed. For continue, it's
# where to jump to, for return, it's the value to return. It gets
# pushed on the stack for both, so continue puts the jump destination
# into return_value.
self.return_value = dest
return 'continue'
def byte_SETUP_EXCEPT(self, dest):
self.push_block('setup-except', dest)
def byte_SETUP_FINALLY(self, dest):
self.push_block('finally', dest)
def byte_END_FINALLY(self):
v = self.pop()
if isinstance(v, str):
why = v
if why in ('return', 'continue'):
self.return_value = self.pop()
if why == 'silenced': # PY3
block = self.pop_block()
assert block.type == 'except-handler'
self.unwind_block(block)
why = None
elif v is None:
why = None
elif issubclass(v, BaseException):
exctype = v
val = self.pop()
tb = self.pop()
self.last_exception = (exctype, val, tb)
why = 'reraise'
else: # pragma: no cover
raise VirtualMachineError("Confused END_FINALLY")
return why
def byte_POP_BLOCK(self):
self.pop_block()
if PY2:
def byte_RAISE_VARARGS(self, argc):
# NOTE: the dis docs are completely wrong about the order of the
# operands on the stack!
exctype = val = tb = None
if argc == 0:
exctype, val, tb = self.last_exception
elif argc == 1:
exctype = self.pop()
elif argc == 2:
val = self.pop()
exctype = self.pop()
elif argc == 3:
tb = self.pop()
val = self.pop()
exctype = self.pop()
# There are a number of forms of "raise", normalize them somewhat.
if isinstance(exctype, BaseException):
val = exctype
exctype = type(val)
self.last_exception = (exctype, val, tb)
if tb:
return 'reraise'
else:
return 'exception'
elif PY3:
def byte_RAISE_VARARGS(self, argc):
cause = exc = None
if argc == 2:
cause = self.pop()
exc = self.pop()
elif argc == 1:
exc = self.pop()
return self.do_raise(exc, cause)
def do_raise(self, exc, cause):
if exc is None: # reraise
exc_type, val, tb = self.last_exception
if exc_type is None:
return 'exception' # error
else:
return 'reraise'
elif type(exc) == type:
# As in `raise ValueError`
exc_type = exc
val = exc() # Make an instance.
elif isinstance(exc, BaseException):
# As in `raise ValueError('foo')`
exc_type = type(exc)
val = exc
else:
return 'exception' # error
# If you reach this point, you're guaranteed that
# val is a valid exception instance and exc_type is its class.
# Now do a similar thing for the cause, if present.
if cause:
if type(cause) == type:
cause = cause()
elif not isinstance(cause, BaseException):
return 'exception' # error
val.__cause__ = cause
self.last_exception = exc_type, val, val.__traceback__
return 'exception'
def byte_POP_EXCEPT(self):
block = self.pop_block()
if block.type != 'except-handler':
raise Exception("popped block is not an except handler")
self.unwind_block(block)
def byte_SETUP_WITH(self, dest):
ctxmgr = self.pop()
self.push(ctxmgr.__exit__)
ctxmgr_obj = ctxmgr.__enter__()
if PY2:
self.push_block('with', dest)
elif PY3:
self.push_block('finally', dest)
self.push(ctxmgr_obj)
def byte_WITH_CLEANUP(self):
# The code here does some weird stack manipulation: the exit function
# is buried in the stack, and where depends on what's on top of it.
# Pull out the exit function, and leave the rest in place.
v = w = None
u = self.top()
if u is None:
exit_func = self.pop(1)
elif isinstance(u, str):
if u in ('return', 'continue'):
exit_func = self.pop(2)
else:
exit_func = self.pop(1)
u = None
elif issubclass(u, BaseException):
if PY2:
w, v, u = self.popn(3)
exit_func = self.pop()
self.push(w, v, u)
elif PY3:
w, v, u = self.popn(3)
tp, exc, tb = self.popn(3)
exit_func = self.pop()
self.push(tp, exc, tb)
self.push(None)
self.push(w, v, u)
block = self.pop_block()
assert block.type == 'except-handler'
self.push_block(block.type, block.handler, block.level-1)
else: # pragma: no cover
raise VirtualMachineError("Confused WITH_CLEANUP")
exit_ret = exit_func(u, v, w)
err = (u is not None) and bool(exit_ret)
if err:
# An error occurred, and was suppressed
if PY2:
self.popn(3)
self.push(None)
elif PY3:
self.push('silenced')
## Functions
def byte_MAKE_FUNCTION(self, argc):
if PY3:
name = self.pop()
else:
name = None
code = self.pop()
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, None, self)
self.push(fn)
def byte_LOAD_CLOSURE(self, name):
self.push(self.frame.cells[name])
def byte_MAKE_CLOSURE(self, argc):
if PY3:
# TODO: the py3 docs don't mention this change.
name = self.pop()
else:
name = None
closure, code = self.popn(2)
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, closure, self)
self.push(fn)
def byte_CALL_FUNCTION(self, arg):
return self.call_function(arg, [], {})
def byte_CALL_FUNCTION_VAR(self, arg):
args = self.pop()
return self.call_function(arg, args, {})
def byte_CALL_FUNCTION_KW(self, arg):
kwargs = self.pop()
return self.call_function(arg, [], kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, arg):
args, kwargs = self.popn(2)
return self.call_function(arg, args, kwargs)
def call_function(self, arg, args, kwargs):
lenKw, lenPos = divmod(arg, 256)
namedargs = {}
for i in range(lenKw):
key, val = self.popn(2)
namedargs[key] = val
namedargs.update(kwargs)
posargs = self.popn(lenPos)
posargs.extend(args)
func = self.pop()
frame = self.frame
ffunc = func
if hasattr(func, 'im_func'):
# Methods get self as an implicit first parameter.
if func.im_self:
posargs.insert(0, func.im_self)
# The first parameter must be the correct type.
if not isinstance(posargs[0], func.im_class):
raise TypeError(
'unbound method %s() must be called with %s instance '
'as first argument (got %s instance instead)' % (
func.im_func.func_name,
func.im_class.__name__,
type(posargs[0]).__name__,
)
)
func = func.im_func
if self.symbolic_on:
if isinstance(func, Function):
func = func._func
if not hasattr(func, 'func_code'):
import ipdb
ipdb.set_trace()
retval = func(*posargs, **namedargs)
elif func.func_code in self.interesting_paths:
import ipdb
ipdb.set_trace()
# setup env from posargs and namedargs
decl = self.get_decl(func.func_code)
env = self.get_env(decl.parent_module.code_object)
func_args = inspect.getargspec(func)
argnames = func_args.args[:]
posargs_copy = posargs[:]
namedargs_copy = namedargs.copy()
defaults = func_args.defaults
defaults_copy = list(defaults) if defaults is not None else None
for name, var in zip(func_args.args, posargs):
env[name] = var
argnames.pop(0)
posargs_copy.pop(0)
# if all posargs were used up, use kwargs
for argname in argnames:
if argname in namedargs:
env[argname] = namedargs[argname]
namedargs_copy.pop(argname)
else:
env[argname] = defaults_copy.pop(0)
if func_args.varargs:
env[func_args.varargs] = []
for var in posargs_copy:
env[func_args.varargs].append(var)
if func_args.keywords:
env[func_args.keywords] = {}
for name, val in namedargs_copy.iteritems():
env[func_args.keywords][name] = val
# XXX(soh): handles closure
closures = func.func_closure or []
for closure in closures:
import ipdb
ipdb.set_trace()
cell_contents = closure.cell_contents
if not self.frame.cells:
self.frame.cells = {}
for var in func.func_code.co_freevars:
cell = Cell(cell_contents)
self.frame.cells[var] = cell
import ipdb
ipdb.set_trace()
retval = self.fork(func.func_code, f_globals=env)
else:
retval = func(*posargs, **namedargs)
else:
retval = func(*posargs, **namedargs)
self.push(retval)
def byte_RETURN_VALUE(self):
self.return_value = self.pop()
if self.frame.generator:
self.frame.generator.finished = True
return "return"
def byte_YIELD_VALUE(self):
self.return_value = self.pop()
return "yield"
def byte_YIELD_FROM(self):
u = self.pop()
x = self.top()
try:
if not isinstance(x, Generator) or u is None:
# Call next on iterators.
retval = next(x)
else:
retval = x.send(u)
self.return_value = retval
except StopIteration as e:
self.pop()
self.push(e.value)
else:
# YIELD_FROM decrements f_lasti, so that it will be called
# repeatedly until a StopIteration is raised.
self.jump(self.frame.f_lasti - 1)
# Returning "yield" prevents the block stack cleanup code
# from executing, suspending the frame in its current state.
return "yield"
## Importing
def byte_IMPORT_NAME(self, name):
level, fromlist = self.popn(2)
frame = self.frame
self.push(
__import__(name, frame.f_globals, frame.f_locals, fromlist, level)
)
def byte_IMPORT_STAR(self):
# TODO: this doesn't use __all__ properly.
mod = self.pop()
for attr in dir(mod):
if attr[0] != '_':
self.frame.f_locals[attr] = getattr(mod, attr)
def byte_IMPORT_FROM(self, name):
mod = self.top()
self.push(getattr(mod, name))
## And the rest...
def byte_EXEC_STMT(self):
stmt, globs, locs = self.popn(3)
six.exec_(stmt, globs, locs)
if PY2:
def byte_BUILD_CLASS(self):
name, bases, methods = self.popn(3)
self.push(type(name, bases, methods))
elif PY3:
def byte_LOAD_BUILD_CLASS(self):
# New in py3
self.push(__build_class__)
def byte_STORE_LOCALS(self):
self.frame.f_locals = self.pop()
if 0: # Not in py2.7
def byte_SET_LINENO(self, lineno):
self.frame.f_lineno = lineno
| {
"content_hash": "0b80a2f47dc3b29be53a3e45bdc81633",
"timestamp": "",
"source": "github",
"line_count": 1190,
"max_line_length": 90,
"avg_line_length": 31.180672268907564,
"alnum_prop": 0.507478776445223,
"repo_name": "sukwon0709/byterun",
"id": "6409204288890fc245c7b4c13d73e0c86fda6b3f",
"size": "37105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byterun/pyvm2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "15958"
},
{
"name": "C",
"bytes": "888870"
},
{
"name": "C#",
"bytes": "806343"
},
{
"name": "C++",
"bytes": "13803449"
},
{
"name": "Makefile",
"bytes": "12960"
},
{
"name": "OCaml",
"bytes": "529503"
},
{
"name": "Objective-C",
"bytes": "3397"
},
{
"name": "Python",
"bytes": "576986"
},
{
"name": "SMT",
"bytes": "14968"
},
{
"name": "Shell",
"bytes": "26991"
}
],
"symlink_target": ""
} |
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
glutils.py
Pythonesque wrappers around certain OpenGL functions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from OpenGL import GL
import numpy
from contextlib import contextmanager
import weakref
from OpenGL.GL import framebufferobjects as FBO
import sys
class gl(object):
@classmethod
def ResetGL(cls):
DisplayList.destroyAllLists()
@classmethod
@contextmanager
def glPushMatrix(cls, matrixmode):
try:
GL.glMatrixMode(matrixmode)
GL.glPushMatrix()
yield
finally:
GL.glMatrixMode(matrixmode)
GL.glPopMatrix()
@classmethod
@contextmanager
def glPushAttrib(cls, *attribs):
allAttribs = reduce(lambda a, b: a | b, attribs)
try:
GL.glPushAttrib(allAttribs)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glPushClientAttrib(cls, *attribs):
allAttribs = reduce(lambda a, b: a | b, attribs)
try:
GL.glPushClientAttrib(allAttribs)
yield
finally:
GL.glPopClientAttrib()
@classmethod
@contextmanager
def glBegin(cls, type):
try:
GL.glBegin(type)
yield
finally:
GL.glEnd()
@classmethod
@contextmanager
def glEnable(cls, *enables):
try:
GL.glPushAttrib(GL.GL_ENABLE_BIT)
for e in enables:
GL.glEnable(e)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glEnableClientState(cls, *enables):
try:
GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
for e in enables:
GL.glEnableClientState(e)
yield
finally:
GL.glPopClientAttrib()
listCount = 0
@classmethod
def glGenLists(cls, n):
cls.listCount += n
return GL.glGenLists(n)
@classmethod
def glDeleteLists(cls, base, n):
cls.listCount -= n
return GL.glDeleteLists(base, n)
allDisplayLists = []
class DisplayList(object):
def __init__(self, drawFunc=None):
self.drawFunc = drawFunc
self._list = None
self.dirty = True
def _delete(r):
allDisplayLists.remove(r)
allDisplayLists.append(weakref.ref(self, _delete))
@classmethod
def destroyAllLists(self):
allLists = []
for listref in allDisplayLists:
list = listref()
if list:
list.destroy()
allLists.append(listref)
allDisplayLists[:] = allLists
def invalidate(self):
self.dirty = True
def destroy(self):
if self._list is not None:
GL.glDeleteLists(self._list, 1)
self._list = None
self.dirty = True
def compile(self, drawFunc):
if not self.dirty and self._list is not None:
return
self._compile(drawFunc)
def _compile(self, drawFunc):
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
if self._list is None:
l = gl.glGenLists(1)
self._list = numpy.array([l], 'uintc')
l = self._list[0]
GL.glNewList(l, GL.GL_COMPILE)
drawFunc()
#try:
GL.glEndList()
#except GL.GLError:
# print "Error while compiling display list. Retrying display list code to pinpoint error"
# self.drawFunc()
self.dirty = False
def getList(self, drawFunc=None):
self.compile(drawFunc)
return self._list
if "-debuglists" in sys.argv:
def call(self, drawFunc=None):
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
drawFunc()
else:
def call(self, drawFunc=None):
self.compile(drawFunc)
GL.glCallLists(self._list)
class Texture(object):
allTextures = []
defaultFilter = GL.GL_NEAREST
def __init__(self, textureFunc=None, minFilter=None, magFilter=None, maxLOD=4):
# maxLOD setting of 4 ensures 16x16 textures reduce to 1x1 and no smaller
self.minFilter = minFilter or self.defaultFilter
self.magFilter = magFilter or self.defaultFilter
if textureFunc is None:
textureFunc = lambda: None
self.textureFunc = textureFunc
self._texID = GL.glGenTextures(1)
self.dirty = True
self.maxLOD = maxLOD
def load(self):
if not self.dirty:
return
self.dirty = False
def _delete(r):
Texture.allTextures.remove(r)
self.allTextures.append(weakref.ref(self, _delete))
self.bind()
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, self.minFilter)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, self.magFilter)
self.textureFunc()
if self.minFilter in (GL.GL_LINEAR_MIPMAP_LINEAR,
GL.GL_LINEAR_MIPMAP_NEAREST,
GL.GL_NEAREST_MIPMAP_LINEAR,
GL.GL_NEAREST_MIPMAP_NEAREST):
if bool(GL.glGenerateMipmap):
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LOD, self.maxLOD)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
def dispose(self):
if self._texID is not None:
GL.glDeleteTextures(self._texID)
self._texID = None
def bind(self):
self.load()
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
def invalidate(self):
self.dirty = True
class FramebufferTexture(Texture):
def __init__(self, width, height, drawFunc):
tex = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA8, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)
self.enabled = False
self._texID = tex
if bool(FBO.glGenFramebuffers) and "Intel" not in GL.glGetString(GL.GL_VENDOR):
buf = FBO.glGenFramebuffers(1)
depthbuffer = FBO.glGenRenderbuffers(1)
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
FBO.glBindRenderbuffer(FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glRenderbufferStorage(FBO.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, width, height)
FBO.glFramebufferRenderbuffer(FBO.GL_FRAMEBUFFER, FBO.GL_DEPTH_ATTACHMENT, FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glFramebufferTexture2D(FBO.GL_FRAMEBUFFER, FBO.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, tex, 0)
status = FBO.glCheckFramebufferStatus(FBO.GL_FRAMEBUFFER)
if status != FBO.GL_FRAMEBUFFER_COMPLETE:
print ("glCheckFramebufferStatus: " + str(status))
self.enabled = False
return
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
with gl.glPushAttrib(GL.GL_VIEWPORT_BIT):
GL.glViewport(0, 0, width, height)
drawFunc()
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, 0)
FBO.glDeleteFramebuffers(1, [buf])
FBO.glDeleteRenderbuffers(1, [depthbuffer])
self.enabled = True
else:
GL.glReadBuffer(GL.GL_BACK)
GL.glPushAttrib(GL.GL_VIEWPORT_BIT | GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT | GL.GL_STENCIL_TEST | GL.GL_STENCIL_BUFFER_BIT)
GL.glDisable(GL.GL_STENCIL_TEST)
GL.glViewport(0, 0, width, height)
GL.glScissor(0, 0, width, height)
with gl.glEnable(GL.GL_SCISSOR_TEST):
drawFunc()
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glReadBuffer(GL.GL_BACK)
GL.glCopyTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
GL.glPopAttrib()
def debugDrawPoint(point):
GL.glColor(1.0, 1.0, 0.0, 1.0)
GL.glPointSize(9.0)
with gl.glBegin(GL.GL_POINTS):
GL.glVertex3f(*point)
| {
"content_hash": "2dd8a0d03d3daa15d2255c1550df9d88",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 145,
"avg_line_length": 30.082781456953644,
"alnum_prop": 0.606274078150798,
"repo_name": "Rubisk/mcedit2",
"id": "683f4bc06462352bff5c79263362f6aeaeec4e29",
"size": "9085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mcedit2/util/glutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "8578"
},
{
"name": "Makefile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "1198213"
}
],
"symlink_target": ""
} |
"""
This example shows how to communicate with Thorlabs
KST101, KCube Stepper Motor.
"""
import os
from pprint import pprint
from msl.equipment import (
EquipmentRecord,
ConnectionRecord,
Backend,
)
from msl.equipment.resources.thorlabs import MotionControl
# ensure that the Kinesis folder is available on PATH
os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'
record = EquipmentRecord(
manufacturer='Thorlabs',
model='KST101',
serial='26000908', # update for your device
connection=ConnectionRecord(
backend=Backend.MSL,
address='SDK::Thorlabs.MotionControl.KCube.StepperMotor.dll',
),
)
def wait():
motor.clear_message_queue()
while True:
status = motor.convert_message(*motor.wait_for_message())['id']
if status == 'Homed' or status == 'Moved':
break
position = motor.get_position()
real = motor.get_real_value_from_device_unit(position, 'DISTANCE')
print(' at position {} [device units] {:.3f} [real-world units]'.format(position, real))
# avoid the FT_DeviceNotFound error
MotionControl.build_device_list()
# connect to the KCube Stepper Motor
motor = record.connect()
print('Connected to {}'.format(motor))
# load the configuration settings, so that we can call
# the get_real_value_from_device_unit() method
motor.load_settings()
# start polling at 200 ms
motor.start_polling(200)
# home the device
print('Homing...')
motor.home()
wait()
print('Homing done. At position {} [device units]'.format(motor.get_position()))
# move to position 100000
print('Moving to 100000...')
motor.move_to_position(100000)
wait()
print('Moving done. At position {} [device units]'.format(motor.get_position()))
# move by a relative amount of -5000
print('Moving by -5000...')
motor.move_relative(-5000)
wait()
print('Moving done. At position {} [device units]'.format(motor.get_position()))
# jog forwards
print('Jogging forwards by {} [device units]'.format(motor.get_jog_step_size()))
motor.move_jog('Forwards')
wait()
print('Jogging done. At position {} [device units]'.format(motor.get_position()))
# stop polling and close the connection
motor.stop_polling()
motor.disconnect()
# you can access the default settings for the motor to pass to the set_*() methods
print('\nThe default motor settings are:')
pprint(motor.settings)
| {
"content_hash": "aaa79f8aa9aee95135a3bdce6aec071d",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 97,
"avg_line_length": 28.202380952380953,
"alnum_prop": 0.7057830308146897,
"repo_name": "MSLNZ/msl-equipment",
"id": "26c1d3aeb6a4dabcec1b404afc1408d3fecc72e6",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "msl/examples/equipment/thorlabs/kst101.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2490464"
}
],
"symlink_target": ""
} |
from __future__ import division
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import log_loss
from sklearn.ensemble import RandomForestClassifier as RF
import argparse
import logging
import numpy as np
import time
def train_predict(train_file, test_file, predict_valid_file, predict_test_file,
n_est, depth, n_fold=5):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG, filename='rf_{}_{}.log'.format(
n_est, depth
))
logging.info('Loading training and test data...')
X, y = load_svmlight_file(train_file)
X_tst, _ = load_svmlight_file(test_file)
clf = RF(n_estimators=n_est, max_depth=depth, random_state=2015)
cv = StratifiedKFold(y, n_folds=n_fold, shuffle=True, random_state=2015)
logging.info('Cross validation...')
p_val = np.zeros_like(y)
lloss = 0.
for i_trn, i_val in cv:
clf.fit(X[i_trn], y[i_trn])
p_val[i_val] = clf.predict_proba(X[i_val])[:, 1]
lloss += log_loss(y[i_val], p_val[i_val])
logging.info('Log Loss = {:.4f}'.format(lloss))
logging.info('Retraining with 100% data...')
clf.fit(X.todense(), y)
p_tst = clf.predict_proba(X_tst.todense())[:, 1]
logging.info('Saving predictions...')
np.savetxt(predict_valid_file, p_val, fmt='%.6f')
np.savetxt(predict_test_file, p_tst, fmt='%.6f')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', required=True, dest='train_file')
parser.add_argument('--test-file', required=True, dest='test_file')
parser.add_argument('--predict-valid-file', required=True,
dest='predict_valid_file')
parser.add_argument('--predict-test-file', required=True,
dest='predict_test_file')
parser.add_argument('--n-est', default=100, type=int, dest='n_est')
parser.add_argument('--depth', default=None, type=int, dest='depth')
args = parser.parse_args()
start = time.time()
train_predict(train_file=args.train_file,
test_file=args.test_file,
predict_valid_file=args.predict_valid_file,
predict_test_file=args.predict_test_file,
n_est=args.n_est,
depth=args.depth)
logging.info('finished ({:.2f} min elasped)'.format((time.time() - start) /
60))
| {
"content_hash": "d68ccbec47baab859938306d8e9711f7",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 38.31884057971015,
"alnum_prop": 0.5847201210287444,
"repo_name": "drivendata/countable-care-3rd-place",
"id": "01dc52d63f466dddb3f2bfc19aec9ed1b4703ef0",
"size": "2667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/train_predict_rf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "114813"
},
{
"name": "Shell",
"bytes": "1980"
}
],
"symlink_target": ""
} |
'''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
from django.db import models
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms import ModelForm
from datetime import datetime
SERVICE_TYPES = (
(0, 'StandAlone Vanilla'),
(1, 'StandAlone ModRex'),
(2, 'ROBUST Service'),
(3, 'User Service'),
(4, 'Grid Service'),
(5, 'ModRex Service'),
(6, 'Vanilla Simulator'),
(7, 'ModRex Simulator'),
(8, 'Maps Service'),
(9, 'Unknown'),
)
class BackendOnlineServiceIniForm(forms.Form):
user_settings = forms.CharField(widget=forms.Textarea())
restart_service = forms.BooleanField(initial=True)
class BackendOfflineServiceIniForm(forms.Form):
user_settings = forms.CharField(widget=forms.Textarea())
| {
"content_hash": "af176a620aedf70e684045322b302791",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 43.372549019607845,
"alnum_prop": 0.7581374321880651,
"repo_name": "Knygar/hwios",
"id": "02f83b1ec0083012254ef2e659a4212100e5237a",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/web_ui/forms/teknon.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import convert_mjpeg
import ytupload
import random
import logging
import os
from oauth2client.tools import argparser
import config
import util
config = util.get_config()
tmp_movie_path = config.get('treasurecolumn', 'tmp_movie_path')
def run(url, frames=20, rate=-1,
google_secret=os.getcwd() + '/client_secrets.json',
google_credentials=os.getcwd() + '/credentials.json'):
logging.debug("Trying %s", url)
if url.endswith('.mpg') or url.endswith('.mp4'):
convert_mjpeg.convert_mp4(url, tmp_movie_path)
else:
convert_mjpeg.convert(url, tmp_movie_path, frames, rate)
ytupload.upload_video(args, tmp_movie_path, google_secret, google_credentials)
if __name__ == '__main__':
argparser.add_argument("--randomize", action='store_true', default=False)
argparser.add_argument("--url", "-u", required=False, help="Video URL", default=None)
argparser.add_argument("--urlsfile", "-U", required=False, help="Video URL File, one per line", default=[])
argparser.add_argument("--frames", "-f", required=False, type=int, help="Frames", default=2000)
argparser.add_argument("--rate", "-r", required=False, type=int, help="Rate", default=-1)
argparser.add_argument("--debug", "-d", action='store_true', default=False)
argparser.add_argument("--googlesecret", "-s", type=str, default=os.getcwd() + '/client_secrets.json')
argparser.add_argument("--googlecredentials", "-c", type=str, default=os.getcwd() + '/credentials.json')
args = argparser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
if args.randomize and random.randint(0,100) > 10:
exit()
if args.url is None and len(args.urlsfile) == 0:
raise Exception("Specifying either --url or --urlsfile is required")
if args.urlsfile:
if not os.path.exists(args.urlsfile):
raise Exception("Couldn't find %s" % args.urlsfile)
urls = open(args.urlsfile, 'r').readlines()
url = random.choice(urls).strip()
else:
url = args.url
run(url, args.frames, args.rate, google_secret=args.googlesecret,
google_credentials=args.googlecredentials)
| {
"content_hash": "bda4658875f554a652f486ccf549a0df",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 108,
"avg_line_length": 41.74,
"alnum_prop": 0.7163392429324389,
"repo_name": "lysol/treasurecolumn",
"id": "be28d5a17095e39fbb56d801cccc557e1014dbcf",
"size": "2087",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "treasurecolumn/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18736"
},
{
"name": "Shell",
"bytes": "8766"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class KeyVaultKeyReference(Model):
"""Describes a reference to Key Vault Key.
:param source_vault: Fully qualified resource Id for the Key Vault.
:type source_vault: :class:`ResourceId
<azure.mgmt.batchai.models.ResourceId>`
:param key_url: The URL referencing a key in a Key Vault.
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'ResourceId'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(self, source_vault, key_url):
self.source_vault = source_vault
self.key_url = key_url
| {
"content_hash": "d4f57f8836ee566bd89e7c1d6ada99c1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 29.23076923076923,
"alnum_prop": 0.6078947368421053,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "2e691cc6bf1ae144ae512751ef1012cdff1927f0",
"size": "1234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/key_vault_key_reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import tornado.web
from handlers.static_handler import StaticHandler
import os
class IndexHandler(StaticHandler):
def get(self):
pid = os.getpid()
self.render('home.html', proccess=pid)
| {
"content_hash": "a74748812fdd125e4714f830bf1188cf",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 25.875,
"alnum_prop": 0.714975845410628,
"repo_name": "antsankov/cufcq-new",
"id": "0bdb99d3bf52cd12da150387a08f473bc8617ff7",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/index_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "812340"
},
{
"name": "HTML",
"bytes": "1893431"
},
{
"name": "JavaScript",
"bytes": "3306545"
},
{
"name": "PHP",
"bytes": "7944"
},
{
"name": "Python",
"bytes": "85926"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
} |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import sys
from posixpath import join as urljoin
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from django.utils import timezone
from django.db.models import Prefetch
import rest_framework.exceptions
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveAPIView
from rest_framework.views import APIView
from gwells.documents import MinioClient
from gwells.urls import app_root
from gwells.pagination import APILimitOffsetPagination
from wells.permissions import (
WellsEditPermissions, WellsSubmissionPermissions, WellsSubmissionViewerPermissions)
from gwells.models import ProvinceStateCode
from gwells.models.lithology import (
LithologyColourCode, LithologyHardnessCode,
LithologyMaterialCode, LithologyMoistureCode, LithologyDescriptionCode)
from gwells.serializers import ProvinceStateCodeSerializer
from gwells.settings.base import get_env_variable
from gwells.views import AuditCreateMixin
from wells.models import (
ActivitySubmission,
CasingCode,
CasingMaterialCode,
CoordinateAcquisitionCode,
DecommissionMaterialCode,
DecommissionMethodCode,
DevelopmentMethodCode,
DrillingMethodCode,
WellDisinfectedCode,
WellOrientationCode,
BoundaryEffectCode,
DriveShoeCode,
FilterPackMaterialCode,
FilterPackMaterialSizeCode,
GroundElevationMethodCode,
IntendedWaterUseCode,
LandDistrictCode,
LicencedStatusCode,
LinerMaterialCode,
ObsWellStatusCode,
ScreenIntakeMethodCode,
SurfaceSealMaterialCode,
SurfaceSealMethodCode,
SurficialMaterialCode,
ScreenTypeCode,
ScreenMaterialCode,
ScreenOpeningCode,
ScreenBottomCode,
ScreenAssemblyTypeCode,
WaterQualityCharacteristic,
WaterQualityColour,
Well,
WellClassCode,
WellSubclassCode,
WellStatusCode,
WellPublicationStatusCode,
YieldEstimationMethodCode,
AquiferLithologyCode,
)
from submissions.models import WellActivityCode
from wells.serializers import (
CasingCodeSerializer,
CasingMaterialSerializer,
)
from submissions.serializers import (
AlterationSubmissionDisplaySerializer,
CoordinateAcquisitionCodeSerializer,
ConstructionSubmissionDisplaySerializer,
DecommissionMaterialCodeSerializer,
DecommissionMethodCodeSerializer,
DecommissionSubmissionDisplaySerializer,
DevelopmentMethodCodeSerializer,
DrillingMethodCodeSerializer,
WellDisinfectedCodeSerializer,
WellOrientationCodeSerializer,
BoundaryEffectCodeSerializer,
DriveShoeCodeSerializer,
FilterPackMaterialCodeSerializer,
FilterPackMaterialSizeCodeSerializer,
GroundElevationMethodCodeSerializer,
IntendedWaterUseCodeSerializer,
LandDistrictSerializer,
LegacyWellDisplaySerializer,
LinerMaterialCodeSerializer,
LithologyHardnessSerializer,
LithologyColourSerializer,
LithologyDescriptionCodeSerializer,
LithologyMaterialSerializer,
LithologyMoistureSerializer,
ObservationWellStatusCodeSerializer,
ScreenIntakeMethodSerializer,
SurfaceSealMaterialCodeSerializer,
SurfaceSealMethodCodeSerializer,
SurficialMaterialCodeSerializer,
ScreenTypeCodeSerializer,
ScreenMaterialCodeSerializer,
ScreenOpeningCodeSerializer,
ScreenBottomCodeSerializer,
ScreenAssemblyTypeCodeSerializer,
WaterQualityCharacteristicSerializer,
WaterQualityColourSerializer,
WellConstructionSubmissionSerializer,
WellAlterationSubmissionSerializer,
WellDecommissionSubmissionSerializer,
WellSubmissionListSerializer,
WellActivityCodeSerializer,
WellClassCodeSerializer,
WellStatusCodeSerializer,
WellPublicationStatusCodeSerializer,
WellSubclassCodeSerializer,
YieldEstimationMethodCodeSerializer,
WellStaffEditSubmissionSerializer,
AquiferLithologySerializer,
LicencedStatusCodeSerializer,
)
logger = logging.getLogger(__name__)
def get_submission_queryset(qs):
return qs.select_related(
"well_class",
"well_subclass",
"intended_water_use",
"person_responsible",
'company_of_person_responsible',
"owner_province_state",
"ground_elevation_method",
"surface_seal_material",
"surface_seal_method",
"liner_material",
) \
.prefetch_related(
"water_quality_characteristics",
"lithologydescription_set",
"linerperforation_set",
"casing_set",
"screen_set",
"decommission_description_set",
"drilling_methods"
) \
.order_by("filing_number")
class SubmissionGetAPIView(RetrieveAPIView):
"""Get a submission"""
permission_classes = (WellsSubmissionViewerPermissions,)
queryset = ActivitySubmission.objects.all()
model = ActivitySubmission
lookup_field = 'filing_number'
def get_serializer(self, *args, **kwargs):
serializer_class = WellSubmissionListSerializer
kwargs['context'] = self.get_serializer_context()
# this method is called with the first argument being the ActivitySubmission object to be serialized
if len(args) == 0:
return serializer_class(*args, **kwargs)
data = args[0]
activity = data.well_activity_type.code
# There are different serializers; which one is used depends on well_activity_type
if activity and activity == WellActivityCode.types.construction().code:
serializer_class = ConstructionSubmissionDisplaySerializer
elif activity and activity == WellActivityCode.types.alteration().code:
serializer_class = AlterationSubmissionDisplaySerializer
elif activity and activity == WellActivityCode.types.decommission().code:
serializer_class = DecommissionSubmissionDisplaySerializer
elif activity and activity == WellActivityCode.types.legacy().code:
serializer_class = LegacyWellDisplaySerializer
return serializer_class(*args, **kwargs)
class SubmissionListAPIView(ListAPIView):
"""List submissions
get: returns a list of well activity submissions
"""
permission_classes = (WellsSubmissionViewerPermissions,)
model = ActivitySubmission
queryset = ActivitySubmission.objects.all()
pagination_class = APILimitOffsetPagination
serializer_class = WellSubmissionListSerializer
def get_queryset(self):
return get_submission_queryset(self.queryset)
def list(self, request, **kwargs):
""" List activity submissions with pagination """
queryset = self.get_queryset()
filtered_queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(filtered_queryset)
if page is not None:
serializer = WellSubmissionListSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = WellSubmissionListSerializer(filtered_queryset, many=True)
return Response(serializer.data)
class SubmissionBase(AuditCreateMixin, ListCreateAPIView):
""" Base class for mutating data that has detailed error logging.
"""
def post(self, request, *args, **kwargs):
try:
return self.create(request, *args, **kwargs)
except rest_framework.exceptions.APIException as error:
try:
logger.warning(('Problem encountered handling POST; '
'user:{request.user.profile.username}; '
'user.is_authenticated:{request.user.is_authenticated}; '
'path:{request.path}; method:{request.method}; status_code:{error.status_code}; '
'request: {request.data}; '
'response: {error.detail}').format(
request=request,
error=error))
except:
logger.error('Error logging error!', exc_info=sys.exc_info())
raise
except:
try:
logger.warning(('Problem encountered handling POST; '
'user:{request.user.profile.username}; '
'user.is_authenticated:{request.user.is_authenticated}; '
'path:{request.path}; method:{request.method};'
'request: {request.data}; '
'detail: {detail}').format(
request=request,
detail=sys.exc_info()))
except:
logger.error('Error logging error!', exc_info=sys.exc_info())
raise
class SubmissionConstructionAPIView(SubmissionBase):
"""Create a construction submission"""
model = ActivitySubmission
serializer_class = WellConstructionSubmissionSerializer
permission_classes = (WellsSubmissionPermissions,)
queryset = ActivitySubmission.objects.all()
def get_queryset(self):
return get_submission_queryset(self.queryset)\
.filter(well_activity_type=WellActivityCode.types.construction())
class SubmissionAlterationAPIView(SubmissionBase):
"""Create an alteration submission"""
model = ActivitySubmission
serializer_class = WellAlterationSubmissionSerializer
permission_classes = (WellsSubmissionPermissions,)
queryset = ActivitySubmission.objects.all()
def get_queryset(self):
return get_submission_queryset(self.queryset)\
.filter(well_activity_type=WellActivityCode.types.alteration())
class SubmissionDecommissionAPIView(SubmissionBase):
"""Create a decommission submission"""
model = ActivitySubmission
serializer_class = WellDecommissionSubmissionSerializer
permission_classes = (WellsSubmissionPermissions,)
queryset = ActivitySubmission.objects.all()
def get_queryset(self):
return get_submission_queryset(self.queryset)\
.filter(well_activity_type=WellActivityCode.types.decommission())
class SubmissionStaffEditAPIView(SubmissionBase):
""" Create a staff edit submission"""
model = ActivitySubmission
serializer_class = WellStaffEditSubmissionSerializer
permission_classes = (WellsEditPermissions,)
queryset = ActivitySubmission.objects.all()
def post(self, request, *args, **kwargs):
# ground_elevation is a decimal so we swap empty string with null value
if 'ground_elevation' in request.data:
if request.data['ground_elevation'] == '':
request.data['ground_elevation'] = None
return self.create(request, *args, **kwargs)
def get_queryset(self):
return get_submission_queryset(self.queryset)\
.filter(well_activity_type=WellActivityCode.types.staff_edit())
class SubmissionsOptions(APIView):
"""Options required for submitting activity report forms"""
swagger_schema = None
def get(self, request, **kwargs):
options = {}
now = timezone.now()
province_codes = ProvinceStateCodeSerializer(
instance=ProvinceStateCode.objects.all(), many=True)
activity_codes = WellActivityCodeSerializer(
instance=WellActivityCode.objects.all(), many=True)
well_class_codes = WellClassCodeSerializer(
instance=WellClassCode.objects.prefetch_related(Prefetch('wellsubclasscode_set',
queryset=WellSubclassCode.objects.filter(expiry_date__gt=now),
to_attr='all_well_subclass_codes')).filter(expiry_date__gt=now), many=True)
intended_water_use_codes = IntendedWaterUseCodeSerializer(
instance=IntendedWaterUseCode.objects.all(), many=True)
casing_codes = CasingCodeSerializer(
instance=CasingCode.objects.all(), many=True)
casing_material = CasingMaterialSerializer(
instance=CasingMaterialCode.objects.all(), many=True)
decommission_materials = DecommissionMaterialCodeSerializer(
instance=DecommissionMaterialCode.objects.all(), many=True)
decommission_methods = DecommissionMethodCodeSerializer(
instance=DecommissionMethodCode.objects.all(), many=True)
well_disinfected_codes = WellDisinfectedCodeSerializer(
instance=WellDisinfectedCode.objects.all(), many=True)
well_orientation_codes = WellOrientationCodeSerializer(
instance=WellOrientationCode.objects.all(), many=True)
boundary_effect_codes = BoundaryEffectCodeSerializer(
instance=BoundaryEffectCode.objects.all(), many=True)
drive_shoe_codes = DriveShoeCodeSerializer(
instance=DriveShoeCode.objects.all(), many=True)
filter_pack_material = FilterPackMaterialCodeSerializer(
instance=FilterPackMaterialCode.objects.all(), many=True)
filter_pack_material_size = FilterPackMaterialSizeCodeSerializer(
instance=FilterPackMaterialSizeCode.objects.all(), many=True)
land_district_codes = LandDistrictSerializer(
instance=LandDistrictCode.objects.all(), many=True)
liner_material_codes = LinerMaterialCodeSerializer(
instance=LinerMaterialCode.objects.all(), many=True)
ground_elevation_method_codes = GroundElevationMethodCodeSerializer(
instance=GroundElevationMethodCode.objects.all(), many=True)
drilling_method_codes = DrillingMethodCodeSerializer(
instance=DrillingMethodCode.objects.all(), many=True)
surface_seal_method_codes = SurfaceSealMethodCodeSerializer(
instance=SurfaceSealMethodCode.objects.all(), many=True)
surface_seal_material_codes = SurfaceSealMaterialCodeSerializer(
instance=SurfaceSealMaterialCode.objects.all(), many=True)
surficial_material_codes = SurficialMaterialCodeSerializer(
instance=SurficialMaterialCode.objects.all(), many=True)
screen_intake_methods = ScreenIntakeMethodSerializer(
instance=ScreenIntakeMethodCode.objects.all(), many=True)
screen_types = ScreenTypeCodeSerializer(instance=ScreenTypeCode.objects.all(), many=True)
screen_materials = ScreenMaterialCodeSerializer(instance=ScreenMaterialCode.objects.all(), many=True)
screen_openings = ScreenOpeningCodeSerializer(instance=ScreenOpeningCode.objects.all(), many=True)
screen_bottoms = ScreenBottomCodeSerializer(instance=ScreenBottomCode.objects.all(), many=True)
screen_assemblies = ScreenAssemblyTypeCodeSerializer(
instance=ScreenAssemblyTypeCode.objects.all(), many=True)
development_methods = DevelopmentMethodCodeSerializer(
instance=DevelopmentMethodCode.objects.all(), many=True)
yield_estimation_methods = YieldEstimationMethodCodeSerializer(
instance=YieldEstimationMethodCode.objects.all(), many=True)
water_quality_characteristics = WaterQualityCharacteristicSerializer(
instance=WaterQualityCharacteristic.objects.all(), many=True)
water_quality_colours = WaterQualityColourSerializer(
instance=WaterQualityColour.objects.all(), many=True)
well_status_codes = WellStatusCodeSerializer(
instance=WellStatusCode.objects.all(), many=True
)
well_publication_status_codes = WellPublicationStatusCodeSerializer(
instance=WellPublicationStatusCode.objects.all(), many=True
)
coordinate_acquisition_codes = CoordinateAcquisitionCodeSerializer(
instance=CoordinateAcquisitionCode.objects.all(), many=True)
observation_well_status = ObservationWellStatusCodeSerializer(
instance=ObsWellStatusCode.objects.all(), many=True
)
aquifer_lithology = AquiferLithologySerializer(instance=AquiferLithologyCode.objects.all(), many=True)
lithology_hardness = LithologyHardnessSerializer(
instance=LithologyHardnessCode.objects.all(), many=True)
lithology_colours = LithologyColourSerializer(instance=LithologyColourCode.objects.all(), many=True)
lithology_materials = LithologyMaterialSerializer(
instance=LithologyMaterialCode.objects.all(), many=True)
lithology_moisture = LithologyMoistureSerializer(
instance=LithologyMoistureCode.objects.all(), many=True)
lithology_descriptors = LithologyDescriptionCodeSerializer(
instance=LithologyDescriptionCode.objects.all(), many=True)
licenced_status_codes = LicencedStatusCodeSerializer(
instance=LicencedStatusCode.objects.all(), many=True)
root = urljoin('/', app_root, 'api/v2/')
for item in activity_codes.data:
if item['code'] not in ('LEGACY'):
item['path'] = reverse(item['code'], kwargs={'version': 'v2'})[len(root):]
options["province_codes"] = province_codes.data
options["activity_types"] = activity_codes.data
options["coordinate_acquisition_codes"] = coordinate_acquisition_codes.data
options["well_classes"] = well_class_codes.data
options["intended_water_uses"] = intended_water_use_codes.data
options["casing_codes"] = casing_codes.data
options["casing_materials"] = casing_material.data
options["decommission_materials"] = decommission_materials.data
options["decommission_methods"] = decommission_methods.data
options["well_disinfected_codes"] = well_disinfected_codes.data
options["well_orientation_codes"] = well_orientation_codes.data
options["boundary_effect_codes"] = boundary_effect_codes.data
options["drive_shoe_codes"] = drive_shoe_codes.data
options["filter_pack_material"] = filter_pack_material.data
options["filter_pack_material_size"] = filter_pack_material_size.data
options["land_district_codes"] = land_district_codes.data
options["liner_material_codes"] = liner_material_codes.data
options["screen_intake_methods"] = screen_intake_methods.data
options["ground_elevation_methods"] = ground_elevation_method_codes.data
options["drilling_methods"] = drilling_method_codes.data
options["surface_seal_methods"] = surface_seal_method_codes.data
options["surface_seal_materials"] = surface_seal_material_codes.data
options["surficial_material_codes"] = surficial_material_codes.data
options["screen_types"] = screen_types.data
options["screen_materials"] = screen_materials.data
options["screen_openings"] = screen_openings.data
options["screen_bottoms"] = screen_bottoms.data
options["screen_assemblies"] = screen_assemblies.data
options["development_methods"] = development_methods.data
options["yield_estimation_methods"] = yield_estimation_methods.data
options["water_quality_characteristics"] = water_quality_characteristics.data
options["water_quality_colours"] = water_quality_colours.data
options["aquifer_lithology_codes"] = aquifer_lithology.data
options["lithology_hardness_codes"] = lithology_hardness.data
options["lithology_colours"] = lithology_colours.data
options["lithology_materials"] = lithology_materials.data
options["lithology_moisture_codes"] = lithology_moisture.data
options["lithology_descriptors"] = lithology_descriptors.data
options["well_status_codes"] = well_status_codes.data
options["well_publication_status_codes"] = well_publication_status_codes.data
options["observation_well_status"] = observation_well_status.data
options["licenced_status_codes"] = licenced_status_codes.data
return Response(options)
class PreSignedDocumentKey(RetrieveAPIView):
"""
Get a pre-signed document key to upload into an S3 compatible document store
post: obtain a URL that is pre-signed to allow client-side uploads
"""
queryset = ActivitySubmission.objects.all()
permission_classes = (WellsSubmissionPermissions,)
def get(self, request, submission_id, **kwargs):
submission = get_object_or_404(self.queryset, pk=submission_id)
client = MinioClient(
request=request, disable_private=False)
object_name = request.GET.get("filename")
filename = client.format_object_name(object_name, int(submission.well.well_tag_number), "well")
bucket_name = get_env_variable("S3_ROOT_BUCKET")
is_private = False
if request.GET.get("private") == "true":
is_private = True
bucket_name = get_env_variable("S3_PRIVATE_ROOT_BUCKET")
url = client.get_presigned_put_url(
filename, bucket_name=bucket_name, private=is_private)
return JsonResponse({"object_name": object_name, "url": url})
| {
"content_hash": "5611f0e04d5e68cac57a834871092eb5",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 144,
"avg_line_length": 44.05858585858586,
"alnum_prop": 0.6986106653216562,
"repo_name": "bcgov/gwells",
"id": "08c5b12e0ab4b9035daa7c25ccdfac1a1aa24f7a",
"size": "21809",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "app/backend/submissions/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "519"
},
{
"name": "Dockerfile",
"bytes": "4104"
},
{
"name": "Groovy",
"bytes": "89156"
},
{
"name": "HTML",
"bytes": "10079"
},
{
"name": "JavaScript",
"bytes": "271010"
},
{
"name": "Makefile",
"bytes": "807"
},
{
"name": "Python",
"bytes": "1550542"
},
{
"name": "SCSS",
"bytes": "7409"
},
{
"name": "Shell",
"bytes": "46319"
},
{
"name": "Vue",
"bytes": "833800"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Migrate users with no group to alumni group."""
users = orm['auth.User'].objects.filter(groups__isnull=True)
alumni_group = orm['auth.Group'].objects.get(name='Alumni')
for user in users:
user.groups.add(alumni_group)
def backwards(self, orm):
"""Remove all users from the Alumni Group."""
users = orm['auth.User'].objects.filter(groups__name='Alumni')
for user in users:
user.groups.clear()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': "orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'date_left_program': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_rotm_nominee': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': "orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['profiles']
symmetrical = True
| {
"content_hash": "4cfddeada91d3aa10951cb6c3ce31156",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 198,
"avg_line_length": 84.83333333333333,
"alnum_prop": 0.5534023932845151,
"repo_name": "johngian/remo",
"id": "75888f4a23326462cb6659cae87ba43db758241c",
"size": "11222",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/profiles/migrations/0053_migrate_users_to_alumni_group.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606422"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7472017"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
} |
import re
# NOTE: When get/get_all/check_update from database with default fields,
# all following fields should be included in output dict.
{
'project': {
'name': str,
'group': str,
'status': str,
'script': str,
# 'config': str,
'comments': str,
# 'priority': int,
'rate': int,
'burst': int,
'updatetime': int,
}
}
class ProjectDB(object):
status_str = [
'TODO',
'STOP',
'CHECKING',
'DEBUG',
'RUNNING',
]
def insert(self, name, obj={}):
raise NotImplementedError
def update(self, name, obj={}, **kwargs):
raise NotImplementedError
def get_all(self, fields=None):
raise NotImplementedError
def get(self, name, fields):
raise NotImplementedError
def drop(self, name):
raise NotImplementedError
def check_update(self, timestamp, fields=None):
raise NotImplementedError
def split_group(self, group, lower=True):
return re.split("\W+", (group or '').lower())
| {
"content_hash": "771e0110ec622f511fd5cc983c959762",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 72,
"avg_line_length": 22.26530612244898,
"alnum_prop": 0.5582034830430798,
"repo_name": "jttoday/spider",
"id": "f6bd4666bc2a02c859f1d3278cdfef9c38ed0b02",
"size": "1276",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyspider/database/base/projectdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24278"
},
{
"name": "HTML",
"bytes": "24449"
},
{
"name": "JavaScript",
"bytes": "48122"
},
{
"name": "Python",
"bytes": "450714"
}
],
"symlink_target": ""
} |
import contextlib
import json
import os
from nose.tools import istest, assert_equal
from catchy import NoCachingStrategy
from whack.tempdir import create_temporary_dir
from whack.files import sh_script_description, plain_file, read_file
from whack.sources import PackageSource
from whack.builder import Builder
from whack.packagerequests import create_package_request
from whack.errors import FileNotFoundError
from whack.downloads import Downloader
@istest
def build_uses_params_as_environment_variables_in_build():
with _package_source("echo $VERSION > $1/version", {}) as package_source:
with create_temporary_dir() as target_dir:
build(create_package_request(package_source, {"version": "42"}), target_dir)
assert_equal("42\n", read_file(os.path.join(target_dir, "version")))
@istest
def build_uses_default_value_for_param_if_param_not_explicitly_set():
description = {"defaultParams": {"version": "42"}}
with _package_source("echo $VERSION > $1/version", description) as package_source:
with create_temporary_dir() as target_dir:
build(create_package_request(package_source, {}), target_dir)
assert_equal("42\n", read_file(os.path.join(target_dir, "version")))
@istest
def explicit_params_override_default_params():
description = {"defaultParams": {"version": "42"}}
with _package_source("echo $VERSION > $1/version", description) as package_source:
with create_temporary_dir() as target_dir:
build(create_package_request(package_source, {"version": "43"}), target_dir)
assert_equal("43\n", read_file(os.path.join(target_dir, "version")))
@istest
def error_is_raised_if_build_script_is_missing():
files = [
plain_file("whack/whack.json", json.dumps({})),
]
with create_temporary_dir(files) as package_source_dir:
package_source = PackageSource.local(package_source_dir)
request = create_package_request(package_source, {})
with create_temporary_dir() as target_dir:
assert_raises(
FileNotFoundError,
("whack/build script not found in package source {0}".format(package_source_dir), ),
lambda: build(request, target_dir),
)
@contextlib.contextmanager
def _package_source(build_script, description):
files = [
plain_file("whack/whack.json", json.dumps(description)),
sh_script_description("whack/build", build_script),
]
with create_temporary_dir(files) as package_source_dir:
yield PackageSource.local(package_source_dir)
def assert_raises(error_class, args, func):
try:
func()
raise AssertionError("Expected exception {0}".format(error_class.__name__))
except error_class as error:
assert_equal(error.args, args)
def build(*args, **kwargs):
cacher = NoCachingStrategy()
builder = Builder(Downloader(cacher))
return builder.build(*args, **kwargs)
| {
"content_hash": "1d4658e348f4651bb2675c81b7418c6d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 100,
"avg_line_length": 37.848101265822784,
"alnum_prop": 0.6779264214046823,
"repo_name": "mwilliamson/whack",
"id": "f144fcf50518d9bfdecbac96e4fef153f0650892",
"size": "2990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/builder_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "530"
},
{
"name": "Python",
"bytes": "106682"
}
],
"symlink_target": ""
} |
import logging
import sys
import arcpy
import pulp
from pyspatialopt.analysis import arcpy_analysis
from pyspatialopt.models import utilities
from pyspatialopt.models import covering
if __name__ == "__main__":
# Initialize a logger so we get formatted output
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# setup stream handler to console output
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
# Read the demand polygon layer
# Demand polygon shapefile has 212 polygons each where each feature has a demand (population) and unique identifier (GEOID10)
demand_polygon_fl = arcpy.MakeFeatureLayer_management(r"../sample_data/demand_polygon.shp").getOutput(0)
# Read the facility service area layer
# Facility service area polygon layer has 8 polygons, where each feature has a unique identifier (ORIG_ID)
facility_service_areas_fl = arcpy.MakeFeatureLayer_management(
r"../sample_data/facility_service_areas.shp").getOutput(0)
# Create binary coverage (polygon) dictionary structure
# Use population of each polygon as demand,
# Use GEOID as the unique field
# Ue ORIG_ID as the unique id for the facilities
binary_coverage_polygon = arcpy_analysis.generate_binary_coverage(demand_polygon_fl, facility_service_areas_fl,
"Population",
"GEOID10", "ORIG_ID")
# Create the mclp model
# Maximize the total coverage (binary polygon) using at most 5 out of 8 facilities
logger.info("Creating MCLP model...")
mclp = covering.create_mclp_model(binary_coverage_polygon, {"total": 5})
# Solve the model using GLPK
logger.info("Solving MCLP...")
mclp.solve(pulp.GLPK())
# Get the unique ids of the 5 facilities chosen
logger.info("Extracting results")
ids = utilities.get_ids(mclp, "facility_service_areas")
# Generate a query that could be used as a definition query or selection in arcpy
select_query = arcpy_analysis.generate_query(ids, unique_field_name="ORIG_ID")
logger.info("Output query to use to generate maps is: {}".format(select_query))
# Determine how much demand is covered by the results
facility_service_areas_fl.definitionQuery = select_query
total_coverage = arcpy_analysis.get_covered_demand(demand_polygon_fl, "Population", "binary",
facility_service_areas_fl)
logger.info("{0:.2f}% of demand is covered".format((100 * total_coverage) / binary_coverage_polygon["totalDemand"]))
| {
"content_hash": "8e3d535c70b92f04dd99eabc30cce2da",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 129,
"avg_line_length": 52.64150943396226,
"alnum_prop": 0.6770609318996416,
"repo_name": "apulverizer/pyspatialopt",
"id": "87fbff6d2c39de39dc6e8bb02d83339923e7f27d",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/binary_mclp_arcpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116578"
}
],
"symlink_target": ""
} |
from testagent.parser.parser import Parser
from testagent.parser.collector_parser import CollectorParser
from testagent.parser.testcase_parser import TestCaseParser
from testagent.parser.testinstance_parser import TestInstanceParser
| {
"content_hash": "462cfa4e3d48e3c9b61f823209306f1a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 67,
"avg_line_length": 58.25,
"alnum_prop": 0.8841201716738197,
"repo_name": "patriziotufarolo/testagent",
"id": "b691058f33162f9731b944044c198ac45cb1fd31",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testagent/parser/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1704"
},
{
"name": "Python",
"bytes": "97165"
}
],
"symlink_target": ""
} |
bl_info = {
"name": "Simple 3D-Coat Applink",
"author": "Kalle-Samuli Riihikoski (haikalle), Mifth",
"version": (0, 4, 0),
"blender": (2, 93, 0),
"location": "3D Viewport",
"description": "Transfer data between 3D-Coat/Blender",
"warning": "",
"wiki_url": "https://3dcoat.com/forum/index.php?/topic/15481-blender-28-applink-simple3dcoat-fork-of-the-official/",
"tracker_url": "",
"category": "Import-Export"}
from . import simple_coat
import bpy
from bpy.props import *
classes = (
simple_coat,
)
def register():
for cls in classes:
cls.register()
def unregister():
for cls in classes:
cls.unregister()
if __name__ == "__main__":
register()
| {
"content_hash": "ee356479ca785634427486dcab2dd64b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 124,
"avg_line_length": 22.02857142857143,
"alnum_prop": 0.5667963683527886,
"repo_name": "mifth/mifthtools",
"id": "e5dc39200fec558dbfb370e91f44086ec8127d89",
"size": "1554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blender/addons/2.8/io_simple_3dcoat/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "MAXScript",
"bytes": "193"
},
{
"name": "Python",
"bytes": "1365496"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.core.validators import RegexValidator
from models import *
from serializers import *
from util import *
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True)
platform = forms.CharField(required=False)
device_identifier = forms.CharField(required=False)
def handle(self, request=None):
try:
username = self.cleaned_data['username']
password = self.cleaned_data['password']
platform = self.cleaned_data['platform']
device_identifier = self.cleaned_data['device_identifier']
create_device = True
if platform:
platform = platform.upper()
if not platform or platform not in ['AND', 'IOS', 'WPH'] or not device_identifier or len(device_identifier) < 1:
create_device = False
token = obtain_token(username, password)
if not token:
return base_response(False, 'Fail to login, verify your credentials.')
user = Token.objects.get(key=token).user
user_serializer = UserSerializer(user)
if create_device:
device_user = Device.objects.filter(user=user, platform=platform, device_identifier=device_identifier).first()
if not device_user:
device_user = Device(user=user, platform=platform, device_identifier=device_identifier)
device_user.save()
response = base_response(True, 'Success to login.')
response['user'] = user_serializer.data
response['user']['token'] = token
return response
except Exception as e:
return base_response(False, 'Fail to login, verify your data.')
class CreateAccountForm(forms.Form):
firstname = forms.CharField(required=True)
lastname = forms.CharField(required=True)
username = forms.CharField(required=True)
email = forms.CharField(required=True)
password = forms.CharField(required=True)
def handle(self, request=None):
try:
firstname = self.cleaned_data['firstname']
lastname = self.cleaned_data['lastname']
username = self.cleaned_data['username']
email = self.cleaned_data['email']
password = self.cleaned_data['password']
# Try to get User
user = find_user(username, email)
if user:
return base_response(False, 'This account exists, try other credentials.')
delete_error = True
current_user = User.objects.create_user(username=username, email=email, password=password, first_name=firstname, last_name=lastname, is_staff=True)
current_user.save()
# If you need to permit user login in admin, I think that its necessary to add the created user to a group
# group = Group.objects.filter(name='<groupname>').first()
# if group: group.user_set.add(current_user)
return base_response(True, ' '.join['Welcome', firstname])
except Exception as e:
if delete_error:
try:
current_user.delete()
except:
pass
print str(e)
return base_response(False, 'Fail to create account.')
class TeamsPagination(forms.Form):
token = forms.CharField(required=True)
username = forms.CharField(required=True)
init = forms.IntegerField(required=False)
threshould = forms.IntegerField(required=False)
def clean(self):
cleaned_data = super(TeamsPagination, self).clean()
for key, value in cleaned_data.items():
if not value:
cleaned_data[key] = self.initial[key]
return cleaned_data
def handle(self, request=None):
token = self.cleaned_data['token']
username = self.cleaned_data['username']
init = self.cleaned_data['init']
threshould = self.cleaned_data['threshould']
valid_user = Token.objects.filter(key=token, user__username=username).first()
if not valid_user:
return base_response(False, 'Invalid credentials.')
teams = Team.objects.all().order_by('id')[init:init+threshould]
teams_serializer = TeamSerializer(teams, many=True)
response = base_response(True, 'Success on list teams')
response['teams'] = teams_serializer.data
return response
| {
"content_hash": "c5f91afa8efe5dfffde154b5741f45d3",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 150,
"avg_line_length": 31.69047619047619,
"alnum_prop": 0.721763085399449,
"repo_name": "brunogabriel/mobile-rest-web",
"id": "83592a0a0910118d0e1a05a11aa50288b550d502",
"size": "4018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrestweb/webapp/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19260"
}
],
"symlink_target": ""
} |
from zerver.lib.test_classes import WebhookTestCase
class PivotalV3HookTests(WebhookTestCase):
STREAM_NAME = 'pivotal'
URL_TEMPLATE = "/api/v1/external/pivotal?stream={stream}&api_key={api_key}"
def test_accepted(self) -> None:
expected_topic = 'My new Feature story'
expected_message = 'Leo Franchi accepted "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573).'
self.check_webhook(
"accepted", expected_topic, expected_message, content_type="application/xml"
)
def test_commented(self) -> None:
expected_topic = 'Comment added'
expected_message = 'Leo Franchi added comment: "FIX THIS NOW" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573).'
self.check_webhook(
"commented", expected_topic, expected_message, content_type="application/xml"
)
def test_created(self) -> None:
expected_topic = 'My new Feature story'
expected_message = 'Leo Franchi added "My new Feature story" \
(unscheduled feature):\n\n~~~ quote\nThis is my long description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573).'
self.check_webhook(
"created", expected_topic, expected_message, content_type="application/xml"
)
def test_delivered(self) -> None:
expected_topic = 'Another new story'
expected_message = 'Leo Franchi delivered "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289).'
self.check_webhook(
"delivered", expected_topic, expected_message, content_type="application/xml"
)
def test_finished(self) -> None:
expected_topic = 'Another new story'
expected_message = 'Leo Franchi finished "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289).'
self.check_webhook(
"finished", expected_topic, expected_message, content_type="application/xml"
)
def test_moved(self) -> None:
expected_topic = 'My new Feature story'
expected_message = 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573).'
self.check_webhook(
"moved", expected_topic, expected_message, content_type="application/xml"
)
def test_rejected(self) -> None:
expected_topic = 'Another new story'
expected_message = 'Leo Franchi rejected "Another new story" with comments: \
"Not good enough, sorry" [(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289).'
self.check_webhook(
"rejected", expected_topic, expected_message, content_type="application/xml"
)
def test_started(self) -> None:
expected_topic = 'Another new story'
expected_message = 'Leo Franchi started "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289).'
self.check_webhook(
"started", expected_topic, expected_message, content_type="application/xml"
)
def test_created_estimate(self) -> None:
expected_topic = 'Another new story'
expected_message = 'Leo Franchi added "Another new story" \
(unscheduled feature worth 2 story points):\n\n~~~ quote\nSome loong description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289).'
self.check_webhook(
"created_estimate", expected_topic, expected_message, content_type="application/xml"
)
def test_type_changed(self) -> None:
expected_topic = 'My new Feature story'
expected_message = 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573).'
self.check_webhook(
"type_changed", expected_topic, expected_message, content_type="application/xml"
)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data('pivotal', fixture_name, file_type='xml')
class PivotalV5HookTests(WebhookTestCase):
STREAM_NAME = 'pivotal'
URL_TEMPLATE = "/api/v1/external/pivotal?stream={stream}&api_key={api_key}"
def test_accepted(self) -> None:
expected_topic = '#63486316: Story of the Year'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **unstarted** to **accepted**"""
self.check_webhook(
"accepted", expected_topic, expected_message, content_type="application/xml"
)
def test_commented(self) -> None:
expected_topic = '#63486316: Story of the Year'
expected_message = """Leo Franchi added a comment to [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
~~~quote
A comment on the story
~~~"""
self.check_webhook(
"commented", expected_topic, expected_message, content_type="application/xml"
)
def test_created(self) -> None:
expected_topic = '#63495662: Story that I created'
expected_message = """Leo Franchi created bug: [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story that I created](http://www.pivotaltracker.com/story/show/63495662)
* State is **unscheduled**
* Description is
> What a description"""
self.check_webhook(
"created", expected_topic, expected_message, content_type="application/xml"
)
def test_delivered(self) -> None:
expected_topic = '#63486316: Story of the Year'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **accepted** to **delivered**"""
self.check_webhook(
"delivered", expected_topic, expected_message, content_type="application/xml"
)
def test_finished(self) -> None:
expected_topic = '#63486316: Story of the Year'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **delivered** to **accepted**"""
self.check_webhook(
"finished", expected_topic, expected_message, content_type="application/xml"
)
def test_moved(self) -> None:
expected_topic = '#63496066: Pivotal Test'
expected_message = """Leo Franchi moved [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066) from **unstarted** to **unscheduled**."""
self.check_webhook(
"moved", expected_topic, expected_message, content_type="application/xml"
)
def test_rejected(self) -> None:
expected_topic = '#63486316: Story of the Year'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* Comment added:
~~~quote
Try again next time
~~~
* state changed from **delivered** to **rejected**"""
self.check_webhook(
"rejected", expected_topic, expected_message, content_type="application/xml"
)
def test_started(self) -> None:
expected_topic = '#63495972: Fresh Story'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Fresh Story](http://www.pivotaltracker.com/story/show/63495972):
* state changed from **unstarted** to **started**"""
self.check_webhook(
"started", expected_topic, expected_message, content_type="application/xml"
)
def test_created_estimate(self) -> None:
expected_topic = '#63496066: Pivotal Test'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate is now **3 points**"""
self.check_webhook(
"created_estimate", expected_topic, expected_message, content_type="application/xml"
)
def test_type_changed(self) -> None:
expected_topic = '#63496066: Pivotal Test'
expected_message = """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate changed from 3 to **0 points**
* type changed from **feature** to **bug**"""
self.check_webhook(
"type_changed", expected_topic, expected_message, content_type="application/xml"
)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data('pivotal', f"v5_{fixture_name}", file_type='json')
| {
"content_hash": "f829bb434b0348857046be27e421fea1",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 218,
"avg_line_length": 48.6951871657754,
"alnum_prop": 0.6634087414891281,
"repo_name": "showell/zulip",
"id": "40c087d500e3c0ca8803973ecb492bd32b1bdf7e",
"size": "9106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/pivotal/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import re
import sys
from pathlib import Path
import yaml
from rich.console import Console
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
f"To run this script, run the ./{__file__} command [FILE] ..."
)
console = Console(color_system="standard", width=200)
AIRFLOW_SOURCES_ROOT = Path(__file__).parents[3].resolve()
EXAMPLE_DAGS_URL_MATCHER = re.compile(
r"^(.*)(https://github.com/apache/airflow/tree/(.*)/airflow/providers/(.*)/example_dags)(/?>.*)$"
)
def get_provider_and_version(url_path: str) -> tuple[str, str]:
candidate_folders = url_path.split("/")
while candidate_folders:
try:
with open(
(AIRFLOW_SOURCES_ROOT / "airflow" / "providers").joinpath(*candidate_folders)
/ "provider.yaml"
) as f:
provider_info = yaml.safe_load(f)
version = provider_info["versions"][0]
provider = "-".join(candidate_folders)
while provider.endswith("-"):
provider = provider[:-1]
return provider, version
except FileNotFoundError:
candidate_folders = candidate_folders[:-1]
console.print(
f"[red]Bad example path: {url_path}. Missing "
f"provider.yaml in any of the 'airflow/providers/{url_path}' folders. [/]"
)
sys.exit(1)
def replace_match(file: Path, line: str) -> str | None:
match = EXAMPLE_DAGS_URL_MATCHER.match(line)
if match:
url_path_to_dir = match.group(4)
folders = url_path_to_dir.split("/")
example_dags_folder = (AIRFLOW_SOURCES_ROOT / "airflow" / "providers").joinpath(
*folders
) / "example_dags"
provider, version = get_provider_and_version(url_path_to_dir)
proper_system_tests_url = (
f"https://github.com/apache/airflow/tree/providers-{provider}/{version}"
f"/tests/system/providers/{url_path_to_dir}"
)
if not example_dags_folder.exists():
if proper_system_tests_url in file.read_text():
console.print(f'[yellow] Removing from {file}[/]\n{line.strip()}')
return None
else:
new_line = re.sub(EXAMPLE_DAGS_URL_MATCHER, r"\1" + proper_system_tests_url + r"\5", line)
if new_line != line:
console.print(f'[yellow] Replacing in {file}[/]\n{line.strip()}\n{new_line.strip()}')
return new_line
return line
def find_matches(_file: Path):
new_lines = []
lines = _file.read_text().splitlines(keepends=True)
for index, line in enumerate(lines):
new_line = replace_match(_file, line)
if new_line is not None:
new_lines.append(new_line)
_file.write_text("".join(new_lines))
if __name__ == '__main__':
for file in sys.argv[1:]:
find_matches(Path(file))
| {
"content_hash": "63a30736cde73a8892e5826670d25bc7",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 106,
"avg_line_length": 34.91954022988506,
"alnum_prop": 0.5829493087557603,
"repo_name": "cfei18/incubator-airflow",
"id": "efd8d5878e6897f70c8ee0161bc3bc880263afa3",
"size": "3845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ci/pre_commit/pre_commit_update_example_dags_paths.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from celery import shared_task
from django.core.mail import send_mail
from backend import settings
from backend.utils import hash_email
from v1.models import *
import tempfile
import json
from django.template.loader import render_to_string
from django.template import Context
# TODO: send HTML alternative as well..
# http://stackoverflow.com/questions/2809547/creating-email-templates-with-django
def _get_title(poll):
max_title_length = 128
return (lambda q: q[:max_title_length] + '...' * (len(q) >= max_title_length))(poll.question)
def _build_ctx(user_ctx):
ctx = {
'footer': """Thanks,
SimpleSTV - https://github.com/szborows/simplestv
(this email was generated automatically. please don't reply to it)"""
}
ctx.update(user_ctx)
return Context(ctx)
def _send_email_to_poll_author(poll, num_recipients):
title = _get_title(poll)
ctx = _build_ctx({
'title': title,
'description': poll.description,
'num_recipients': num_recipients,
'deadline': str(poll.deadline),
'url': '{0}/#/p/results/{1}'.format(settings.SIMPLESTV_URL, poll.secret)})
body = render_to_string('poll_dashboard.txt', ctx)
send_mail('Poll created: ' + title, body, settings.DEFAULT_FROM_EMAIL, [poll.author_email], fail_silently=False)
def _send_email_to_poll_recipient(poll, recipient):
title = _get_title(poll)
ctx = _build_ctx({
'title': title,
'description': poll.description,
'deadline': poll.deadline,
'author_email': poll.author_email, # TODO: add author displayName
'url': '{0}/#/p/{1}/{2}'.format(
settings.SIMPLESTV_URL,
poll.hash_id,
poll.allowed_hashes.get(value=hash_email(recipient)))})
body = render_to_string('poll_invitation.txt', ctx)
send_mail('Poll invitation: ' + title, body, settings.DEFAULT_FROM_EMAIL, [recipient], fail_silently=False)
@shared_task
def send_emails(poll, recipients):
_send_email_to_poll_author(poll, len(recipients))
for recipient in recipients:
_send_email_to_poll_recipient(poll, recipient)
if not poll.sent_emails_json:
sent_emails = []
else:
sent_emails = json.loads(poll.sent_emails_json)
sent_emails.append(recipient)
poll.sent_emails_json = json.dumps(sent_emails)
poll.save()
def _send_poll_close_email_to_author(poll, deadline):
title = _get_title(poll)
ctx = _build_ctx({
'title': title,
'description': poll.description,
'deadline': deadline,
'url': '{0}/#/p/results/{1}'.format(settings.SIMPLESTV_URL, poll.secret)})
body = render_to_string('poll_closed.txt', ctx)
send_mail('Poll closed: ' + title, body, settings.DEFAULT_FROM_EMAIL, [poll.author_email], fail_silently=False)
def _send_poll_failed_email_to_author(poll):
title = _get_title(poll)
ctx = _build_ctx({
'title': title,
'description': poll.description,
'url': '{0}/#/p/results/{1}'.format(settings.SIMPLESTV_URL, poll.secret)})
body = render_to_string('poll_failed.txt', ctx)
send_mail('Poll failed: ' + title, body, settings.DEFAULT_FROM_EMAIL, [poll.author_email], fail_silently=False)
@shared_task
def send_final_email_due_to_deadline(poll):
if poll.num_invited == len(poll.allowed_hashes.all()):
_send_poll_failed_email_to_author(poll)
else:
_send_poll_close_email_to_author(poll, True)
@shared_task
def send_final_email_due_to_voter_turnover(poll):
_send_poll_close_email_to_author(poll, False)
def getWinnersFromOpenStvOutput(output, choices):
lines = output.split('\n')
if len(lines) < 3:
return None
winner_indices = [int(x.strip()) for x in lines[-2].split(',')]
return [{'id': choices[index].id, 'value': choices[index].value} for index in winner_indices]
@shared_task
def send_reminder(poll):
title = _get_title(poll)
ctx = _build_ctx({
'datetime_created': poll.datetime_created,
'title': title,
'description': poll.description,
'deadline': poll.deadline,
'author_email': poll.author_email, # TODO: add author displayName
'url': '{0}/#/p/results/{1}'.format(settings.SIMPLESTV_URL, poll.secret)})
body = render_to_string('poll_reminder.txt', ctx)
all_ = json.loads(poll.recipients_json)
voted = json.loads(poll.voted_json)
for r in all_:
if r not in voted:
send_mail('Poll reminder: ' + title, body, settings.DEFAULT_FROM_EMAIL, [poll.author_email], fail_silently=False)
@shared_task
def send_last_reminder(poll):
title = _get_title(poll)
ctx = _build_ctx({
'title': title,
'description': poll.description,
'deadline': deadline,
'author_email': poll.author_email, # TODO: add author displayName
'url': '{0}/#/p/results/{1}'.format(settings.SIMPLESTV_URL, poll.secret)})
body = render_to_string('poll_last_reminder.txt', ctx)
all_ = json.loads(poll.recipients_json)
voted = json.loads(poll.voted_json)
for r in all_:
if r not in voted:
send_mail('Poll reminder: ' + title, body, settings.DEFAULT_FROM_EMAIL, [poll.author_email], fail_silently=False)
@shared_task
def run_election(poll):
def write_blt_file(poll):
# TODO: someone might try to hack SimpleSTV here by preparing OpenSTV BLT file!
# analysis how to avoid this is needed!
fd, path = tempfile.mkstemp(prefix='simplestv', suffix='.blt')
fp = open(fd, 'w')
fp.write('{0} {1}\n'.format(len(poll.choices.all()), poll.num_seats))
possible_choices = [c.id for c in poll.choices.all()]
for ballot in Vote.objects.filter(poll=poll):
preference = json.loads(ballot.choices_json)
preference = [possible_choices.index(p) + 1 for p in preference]
fp.write('1 {} 0\n'.format(' '.join(map(str, preference))))
fp.write('0\n')
for candidate in poll.choices.all():
fp.write('"{}"\n'.format(candidate.value))
fp.write('"{}"\n'.format(poll.question))
fp.close()
return path
blt_path = write_blt_file(poll)
with open(blt_path) as fp:
content = fp.read()
_, path = tempfile.mkstemp(prefix='simplestv', suffix='.out')
from openstv.openstv.wrapped3 import run
run(blt_path, path, poll.num_seats)
with open(path) as fp:
output = fp.read()
winners = getWinnersFromOpenStvOutput(output, poll.choices.all())
return json.dumps({
'blt': content,
'output': output,
'winners': winners
})
@shared_task
def run_final_election(poll):
result = json.loads(run_election(poll))
output = result['output']
poll.output = output
poll.winners = [
poll.choices.get(id=id_) for id_ in [
int(x['id']) for x in result['winners']
]
]
poll.save()
@shared_task
def test_celery():
return 997
| {
"content_hash": "12b6fe95b96018a9785f35b3ef5c1006",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 125,
"avg_line_length": 35.95918367346939,
"alnum_prop": 0.6322360953461975,
"repo_name": "szborows/simplestv",
"id": "7fe1f6590cbacbb64845bb7528ff80ab389c442d",
"size": "7048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/v1/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35769"
},
{
"name": "JavaScript",
"bytes": "54588"
},
{
"name": "Python",
"bytes": "32353"
},
{
"name": "Shell",
"bytes": "3170"
}
],
"symlink_target": ""
} |
import httplib
import json
import requests
from framework.config import settings
from framework.testbase import BaseTestCase
class RestJsonExample(BaseTestCase):
def setUp(self):
"""test setup"""
self.remove_books()
def tearDown(self):
"""test cleanup"""
self.remove_books()
@staticmethod
def create_book(filename):
with open(filename) as f:
book = json.load(f)
return requests.post(settings["books_url"], json=book, timeout=float(settings["http_timeout"]))
@staticmethod
def remove_books():
requests.delete(settings["books_url"], timeout=float(settings["http_timeout"]))
def test_book_creation(self):
"""verify book creation using httpsim.py in python-test"""
r = self.create_book("resources/requests/books/request1.json")
self.assertEqual(r.status_code, httplib.CREATED)
| {
"content_hash": "0163a140a30cdf6a24327977fbdf372f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 103,
"avg_line_length": 25.82857142857143,
"alnum_prop": 0.6626106194690266,
"repo_name": "bpuderer/python_test_env",
"id": "78863d8a022901411c67abb51b2e05e0787eb3d5",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rest_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12364"
}
],
"symlink_target": ""
} |
"""Basic route-based demonstration application.
Applications can be as simple or as complex and layered as your needs dictate.
"""
from web.dialect.route import route
class Root(object):
__dispatch__ = 'route'
@route('/')
def index(self):
return "Root handler."
@route('/page/{name}')
def page(self, name):
return "Page handler for: " + name
if __name__ == '__main__':
from web.core.application import Application
from marrow.server.http import HTTPServer
HTTPServer('127.0.0.1', 8080, application=Application(Root)).start()
| {
"content_hash": "53c035c75e1102d6abd242ed938acaf8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 23.6,
"alnum_prop": 0.6491525423728813,
"repo_name": "marrow/WebCore",
"id": "536cf5b9941fb91a2024f0bd97c2dcea765eca8e",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "example/future/routing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "747"
},
{
"name": "Makefile",
"bytes": "901"
},
{
"name": "Python",
"bytes": "133793"
}
],
"symlink_target": ""
} |
import abc
import logging
import sys
import six
import ujson
LOGGER = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseBackend(object):
"""Base abstract backend class.
Backend implementation should inherit and implement ``get_raw`` method.
:param parser: Callable that accept a string and parse it, default: ``ujson.loads``.
:param logger: :class:`logging.Logger`` instance.
"""
def __init__(self, parser=ujson.loads, logger=LOGGER):
self.__callbacks = []
self.__parser = parser
self._logger = logger
@abc.abstractmethod
def get_raw(self, path):
"""Get path value from backend as it is.
:path: key in the backend.
:return: path value as saved in backend or None if not found in backend.
"""
def get(self, path):
"""Get parsed path value in backend.
:path: key in the backend.
:return: path value parsed.
"""
data = self.get_raw(path)
return self._parse_raw_data(data)
def _parse_raw_data(self, data):
if data is None:
return {}
return self.__parser(data)
def add_listener(self, callback):
"""Add callback to be called when data change in the backend.
If the same callback is added more than once, then it will be notified more than once.
That is, no check is made to ensure uniqueness.
:param callback: Callable that accept one argument the new data.
"""
self.__callbacks.append(callback)
def remove_listener(self, callback):
"""Remove previously added callback.
If callback had been added more than once, then only the first occurrence will be removed.
:param callback: Callable as with ``:meth: add_listener``.
:raise ValueError: In case callback was not previously registered.
"""
self.__callbacks.remove(callback)
def _notify_listeners(self, value):
self._logger.debug('Notify listeners of new value: %r', value)
value = self._parse_raw_data(value)
last_exc = None
for callback in self.__callbacks:
try:
callback(value)
except Exception:
last_exc = sys.exc_info()
if last_exc:
self._logger.exception('Notify listeners raised an exception', exc_info=last_exc)
try:
raise last_exc
finally:
del last_exc
| {
"content_hash": "1232c5c6a7a3f9aa2df8671871b8b781",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 98,
"avg_line_length": 29.654761904761905,
"alnum_prop": 0.6085909273384184,
"repo_name": "mouadino/distconfig",
"id": "248e087bccfec9b7a8fa0ea80110ef4ce55be136",
"size": "2491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distconfig/backends/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37158"
},
{
"name": "Shell",
"bytes": "1148"
}
],
"symlink_target": ""
} |
__revision__ = "src/engine/SCons/Script/Interactive.py 2014/07/05 09:42:21 garyo"
__doc__ = """
SCons interactive mode
"""
# TODO:
#
# This has the potential to grow into something with a really big life
# of its own, which might or might not be a good thing. Nevertheless,
# here are some enhancements that will probably be requested some day
# and are worth keeping in mind (assuming this takes off):
#
# - A command to re-read / re-load the SConscript files. This may
# involve allowing people to specify command-line options (e.g. -f,
# -I, --no-site-dir) that affect how the SConscript files are read.
#
# - Additional command-line options on the "build" command.
#
# Of the supported options that seemed to make sense (after a quick
# pass through the list), the ones that seemed likely enough to be
# used are listed in the man page and have explicit test scripts.
#
# These had code changed in Script/Main.py to support them, but didn't
# seem likely to be used regularly, so had no test scripts added:
#
# build --diskcheck=*
# build --implicit-cache=*
# build --implicit-deps-changed=*
# build --implicit-deps-unchanged=*
#
# These look like they should "just work" with no changes to the
# existing code, but like those above, look unlikely to be used and
# therefore had no test scripts added:
#
# build --random
#
# These I'm not sure about. They might be useful for individual
# "build" commands, and may even work, but they seem unlikely enough
# that we'll wait until they're requested before spending any time on
# writing test scripts for them, or investigating whether they work.
#
# build -q [??? is there a useful analog to the exit status?]
# build --duplicate=
# build --profile=
# build --max-drift=
# build --warn=*
# build --Y
#
# - Most of the SCons command-line options that the "build" command
# supports should be settable as default options that apply to all
# subsequent "build" commands. Maybe a "set {option}" command that
# maps to "SetOption('{option}')".
#
# - Need something in the 'help' command that prints the -h output.
#
# - A command to run the configure subsystem separately (must see how
# this interacts with the new automake model).
#
# - Command-line completion of target names; maybe even of SCons options?
# Completion is something that's supported by the Python cmd module,
# so this should be doable without too much trouble.
#
import cmd
import copy
import os
import re
import shlex
import sys
try:
import readline
except ImportError:
pass
class SConsInteractiveCmd(cmd.Cmd):
"""\
build [TARGETS] Build the specified TARGETS and their dependencies.
'b' is a synonym.
clean [TARGETS] Clean (remove) the specified TARGETS and their
dependencies. 'c' is a synonym.
exit Exit SCons interactive mode.
help [COMMAND] Prints help for the specified COMMAND. 'h' and
'?' are synonyms.
shell [COMMANDLINE] Execute COMMANDLINE in a subshell. 'sh' and '!'
are synonyms.
version Prints SCons version information.
"""
synonyms = {
'b' : 'build',
'c' : 'clean',
'h' : 'help',
'scons' : 'build',
'sh' : 'shell',
}
def __init__(self, **kw):
cmd.Cmd.__init__(self)
for key, val in kw.items():
setattr(self, key, val)
if sys.platform == 'win32':
self.shell_variable = 'COMSPEC'
else:
self.shell_variable = 'SHELL'
def default(self, argv):
print "*** Unknown command: %s" % argv[0]
def onecmd(self, line):
line = line.strip()
if not line:
print self.lastcmd
return self.emptyline()
self.lastcmd = line
if line[0] == '!':
line = 'shell ' + line[1:]
elif line[0] == '?':
line = 'help ' + line[1:]
if os.sep == '\\':
line = line.replace('\\', '\\\\')
argv = shlex.split(line)
argv[0] = self.synonyms.get(argv[0], argv[0])
if not argv[0]:
return self.default(line)
else:
try:
func = getattr(self, 'do_' + argv[0])
except AttributeError:
return self.default(argv)
return func(argv)
def do_build(self, argv):
"""\
build [TARGETS] Build the specified TARGETS and their
dependencies. 'b' is a synonym.
"""
import SCons.Node
import SCons.SConsign
import SCons.Script.Main
options = copy.deepcopy(self.options)
options, targets = self.parser.parse_args(argv[1:], values=options)
SCons.Script.COMMAND_LINE_TARGETS = targets
if targets:
SCons.Script.BUILD_TARGETS = targets
else:
# If the user didn't specify any targets on the command line,
# use the list of default targets.
SCons.Script.BUILD_TARGETS = SCons.Script._build_plus_default
nodes = SCons.Script.Main._build_targets(self.fs,
options,
targets,
self.target_top)
if not nodes:
return
# Call each of the Node's alter_targets() methods, which may
# provide additional targets that ended up as part of the build
# (the canonical example being a VariantDir() when we're building
# from a source directory) and which we therefore need their
# state cleared, too.
x = []
for n in nodes:
x.extend(n.alter_targets()[0])
nodes.extend(x)
# Clean up so that we can perform the next build correctly.
#
# We do this by walking over all the children of the targets,
# and clearing their state.
#
# We currently have to re-scan each node to find their
# children, because built nodes have already been partially
# cleared and don't remember their children. (In scons
# 0.96.1 and earlier, this wasn't the case, and we didn't
# have to re-scan the nodes.)
#
# Because we have to re-scan each node, we can't clear the
# nodes as we walk over them, because we may end up rescanning
# a cleared node as we scan a later node. Therefore, only
# store the list of nodes that need to be cleared as we walk
# the tree, and clear them in a separate pass.
#
# XXX: Someone more familiar with the inner workings of scons
# may be able to point out a more efficient way to do this.
SCons.Script.Main.progress_display("scons: Clearing cached node information ...")
seen_nodes = {}
def get_unseen_children(node, parent, seen_nodes=seen_nodes):
def is_unseen(node, seen_nodes=seen_nodes):
return node not in seen_nodes
return list(filter(is_unseen, node.children(scan=1)))
def add_to_seen_nodes(node, parent, seen_nodes=seen_nodes):
seen_nodes[node] = 1
# If this file is in a VariantDir and has a
# corresponding source file in the source tree, remember the
# node in the source tree, too. This is needed in
# particular to clear cached implicit dependencies on the
# source file, since the scanner will scan it if the
# VariantDir was created with duplicate=0.
try:
rfile_method = node.rfile
except AttributeError:
return
else:
rfile = rfile_method()
if rfile != node:
seen_nodes[rfile] = 1
for node in nodes:
walker = SCons.Node.Walker(node,
kids_func=get_unseen_children,
eval_func=add_to_seen_nodes)
n = walker.get_next()
while n:
n = walker.get_next()
for node in seen_nodes.keys():
# Call node.clear() to clear most of the state
node.clear()
# node.clear() doesn't reset node.state, so call
# node.set_state() to reset it manually
node.set_state(SCons.Node.no_state)
node.implicit = None
# Debug: Uncomment to verify that all Taskmaster reference
# counts have been reset to zero.
#if node.ref_count != 0:
# from SCons.Debug import Trace
# Trace('node %s, ref_count %s !!!\n' % (node, node.ref_count))
SCons.SConsign.Reset()
SCons.Script.Main.progress_display("scons: done clearing node information.")
def do_clean(self, argv):
"""\
clean [TARGETS] Clean (remove) the specified TARGETS
and their dependencies. 'c' is a synonym.
"""
return self.do_build(['build', '--clean'] + argv[1:])
def do_EOF(self, argv):
print
self.do_exit(argv)
def _do_one_help(self, arg):
try:
# If help_<arg>() exists, then call it.
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
func = getattr(self, 'do_' + arg)
except AttributeError:
doc = None
else:
doc = self._doc_to_help(func)
if doc:
sys.stdout.write(doc + '\n')
sys.stdout.flush()
else:
doc = self.strip_initial_spaces(func())
if doc:
sys.stdout.write(doc + '\n')
sys.stdout.flush()
def _doc_to_help(self, obj):
doc = obj.__doc__
if doc is None:
return ''
return self._strip_initial_spaces(doc)
def _strip_initial_spaces(self, s):
#lines = s.split('\n')
lines = s.split('\n')
spaces = re.match(' *', lines[0]).group(0)
#def strip_spaces(l):
# if l.startswith(spaces):
# l = l[len(spaces):]
# return l
#return '\n'.join([ strip_spaces(l) for l in lines ])
def strip_spaces(l, spaces=spaces):
if l[:len(spaces)] == spaces:
l = l[len(spaces):]
return l
lines = list(map(strip_spaces, lines))
return '\n'.join(lines)
def do_exit(self, argv):
"""\
exit Exit SCons interactive mode.
"""
sys.exit(0)
def do_help(self, argv):
"""\
help [COMMAND] Prints help for the specified COMMAND. 'h'
and '?' are synonyms.
"""
if argv[1:]:
for arg in argv[1:]:
if self._do_one_help(arg):
break
else:
# If bare 'help' is called, print this class's doc
# string (if it has one).
doc = self._doc_to_help(self.__class__)
if doc:
sys.stdout.write(doc + '\n')
sys.stdout.flush()
def do_shell(self, argv):
"""\
shell [COMMANDLINE] Execute COMMANDLINE in a subshell. 'sh' and
'!' are synonyms.
"""
import subprocess
argv = argv[1:]
if not argv:
argv = os.environ[self.shell_variable]
try:
# Per "[Python-Dev] subprocess insufficiently platform-independent?"
# http://mail.python.org/pipermail/python-dev/2008-August/081979.html "+
# Doing the right thing with an argument list currently
# requires different shell= values on Windows and Linux.
p = subprocess.Popen(argv, shell=(sys.platform=='win32'))
except EnvironmentError, e:
sys.stderr.write('scons: %s: %s\n' % (argv[0], e.strerror))
else:
p.wait()
def do_version(self, argv):
"""\
version Prints SCons version information.
"""
sys.stdout.write(self.parser.version + '\n')
def interact(fs, parser, options, targets, target_top):
c = SConsInteractiveCmd(prompt = 'scons>>> ',
fs = fs,
parser = parser,
options = options,
targets = targets,
target_top = target_top)
c.cmdloop()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "d7c57235730059240b404a24e36c39ae",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 89,
"avg_line_length": 35.687845303867405,
"alnum_prop": 0.5458626828701912,
"repo_name": "Bforartists/scons",
"id": "b8008821807495cf0ecdb0257d76ae83e2836759",
"size": "14030",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scons-local/SCons/Script/Interactive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1914323"
}
],
"symlink_target": ""
} |
import numpy as np
from superbol.fit_blackbody import (bb_flux_integrated, bb_total_flux,
dbb_flux_integrated_dT, dbb_total_flux_dT)
def integrate_fqbol(wavelengths, fluxes, flux_uncertainties):
"""Calculate the trapezoidal rule integral of the observed `fluxes`.
The trapezoidal rule integrates the data by assuming the function is linear between observed points, and then integrates under those line segments.
The numpy function `trapz` is used to perform the integration, but the uncertainty in the integral due to uncertainties in the observed flux is calculated by hand using standard error propagation techniques.
Args:
wavelengths (list): List of wavelengths at which the flux was observed.
fluxes (list): List of observed fluxes.
flux_uncertainties (list): List of uncertainties in each observed flux.
Returns:
tuple: 2-tuple of floats.
* The value of the integral
* The uncertainty in the integral due to uncertainties in the fluxes.
(fqbol, fqbol_uncertainty)
"""
fqbol = np.trapz(fluxes, wavelengths)
quad_terms = np.array([])
for i, uncertainty in enumerate(flux_uncertainties):
if i == 0:
term = 0.5 * (wavelengths[i + 1] - wavelengths[i]) * uncertainty
quad_terms = np.append(quad_terms, term)
elif i == len(flux_uncertainties) - 1:
term = 0.5 * (wavelengths[i] - wavelengths[i - 1]) * uncertainty
quad_terms = np.append(quad_terms, term)
else:
term = 0.5 * (
wavelengths[i + 1] - wavelengths[i - 1]) * uncertainty
quad_terms = np.append(quad_terms, term)
fqbol_uncertainty = np.sqrt(np.sum(x * x for x in quad_terms))
fqbol_uncertainty = fqbol_uncertainty
return fqbol, fqbol_uncertainty
def ir_correction(temperature, T_err, angular_radius, rad_err, longest_wl):
"""Apply correction for unobserved flux in the IR.
After the temperature and angular radius has been found through fitting a
blackbody to the observed fluxes, this function takes those values and
integrates under the fitted blackbody function from the longest observed
wavelength out to :math:`\\lambda = \\infty`.
Args:
temperature (float): Best fit blackbody temperature in Kelvin
T_err (float): Uncertainty in best fit blackbody temperature in Kelvin
angular_radius (float): Best fit blackbody angular radius
rad_err (float): Uncertainty in best fit blackbody angular radius
longest_wl (float): Longest observed wavelength
Returns:
tuple: 2-tuple
* (float): The IR correction in :math:`erg \\; s^{-1} cm^{-2}`
* (float): The uncertainty in the IR correction in the same units
"""
ir_correction = bb_total_flux(temperature,
angular_radius) - bb_flux_integrated(
longest_wl, temperature, angular_radius)
T_errterm = (dbb_total_flux_dT(temperature, angular_radius) -
dbb_flux_integrated_dT(longest_wl, temperature,
angular_radius)) * T_err
rad_errterm = 2 * ir_correction / angular_radius * rad_err
ir_corr_err = np.sqrt(T_errterm**2 + rad_errterm**2)
return ir_correction, ir_corr_err
def uv_correction_blackbody(temperature, T_err, angular_radius, rad_err,
shortest_wl):
"""Apply correction for unobserved flux in the UV using the blackbody fit.
After the temperature and angular radius have been found through fitting a
blackbody to the observed fluxes, this function takes those values and
integrates under the fitted blackbody from the shortest observed wavelength
down to :math:`\\lambda = 0`.
Args:
temperature (float): Best fit blackbody temperature in Kelvin
T_err (float): Uncertainty in best fit blackbody temperature in Kelvin
angular_radius (float): Best fit blackbody angular radius
rad_err (float): Uncertainty in best fit blackbody angular radius
shortest_wl (float): Shortest observed wavelength
Returns:
tuple: 2-tuple
* (float): The UV correction in :math:`erg \\; s^{-1} cm^{-2}`
* (float): The uncertainty in the UV correction in the same units
"""
uv_correction = bb_flux_integrated(shortest_wl, temperature,
angular_radius)
T_errterm = dbb_flux_integrated_dT(shortest_wl, temperature,
angular_radius) * T_err
rad_errterm = 2 * uv_correction / angular_radius * rad_err
uv_corr_err = np.sqrt(T_errterm**2 + rad_errterm**2)
return uv_correction, uv_corr_err
def uv_correction_linear(shortest_wl, shortest_flux, shortest_flux_err):
"""Apply correction for unobserved flux in the UV using a linear function.
This function integrates under a straight line from the shortest observed
wavelength down to :math:`f(\\lambda) = 0` at :math:`\\lambda = 2000`
Angstroms. This approximates the effects of line blanketing in the UV as in
Bersten & Hamuy (2009).
Args:
shortest_wl (float): Shortest observed wavelength
shortest_flux (float): Flux at shortest observed wavelength
shortest_flux_err (float): Uncertainty in the shortest observed flux
Returns:
tuple: 2-tuple
* (float): The UV correction in :math:`erg \\; s^{-1} cm^{-2}`
* (float): The uncertainty in the UV correction in the same units
"""
fluxes = [0.0, shortest_flux]
wavelengths = [2000.0, shortest_wl]
uv_correction = np.trapz(fluxes, wavelengths)
uv_correction_err = 0.5 * (shortest_wl - 2000.0) * shortest_flux_err
return uv_correction, uv_correction_err
| {
"content_hash": "2b25a9e56b4eb84df3aebcab81a40260",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 211,
"avg_line_length": 41.61702127659574,
"alnum_prop": 0.6511588275391956,
"repo_name": "JALusk/SuperBoL",
"id": "c30d7f852d1a4a5f9c471acea886c6210bca4697",
"size": "5868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superbol/fbol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "94"
},
{
"name": "Python",
"bytes": "76175"
}
],
"symlink_target": ""
} |
from amazonia.classes.hosted_zone import HostedZone
from nose.tools import *
from troposphere import Template, ec2, Ref
template = vpc = None
def setup_resources():
"""
Create generic testing data
"""
global template
global vpc
template = Template()
vpc = template.add_resource(ec2.VPC('MyVPC',
CidrBlock='10.0.0.0/16',
EnableDnsSupport='true',
EnableDnsHostnames='true'))
def create_hosted_zone(domain, vpcs=None):
"""
Creates a hosted zone using the Amazonia class for testing.
:param domain: the domain to give the hosted zone
:param vpcs: a list of vpcs to attach the hosted zone to (making it private)
:return: A hosted zone object created from the amazonia class
"""
return HostedZone(template=template, domain=domain, vpcs=vpcs)
@with_setup(setup_resources)
def test_public_hosted_zone():
"""
Tests creation of a public hosted zone.
"""
domain = 'public.domain.'
hz = create_hosted_zone(domain)
assert_equals(hz.trop_hosted_zone.Name, domain)
@with_setup(setup_resources)
def test_private_hosted_zone():
"""
Tests creation of a private hosted zone.
"""
global vpc
domain = 'private.domain.'
hz = create_hosted_zone(domain, [Ref(vpc)])
assert_equals(type(hz.trop_hosted_zone.VPCs), type([]))
assert_equals(hz.trop_hosted_zone.Name, domain)
| {
"content_hash": "959fbc56ba7c6214769c2e58b9bfaf59",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 24.95,
"alnum_prop": 0.6279225116900468,
"repo_name": "GeoscienceAustralia/amazonia",
"id": "5a2a2032c48d8cf94d19c7202a000a5ae33d751b",
"size": "1517",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/unit_tests/test_hosted_zone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3123"
},
{
"name": "Makefile",
"bytes": "380"
},
{
"name": "Python",
"bytes": "423575"
},
{
"name": "Shell",
"bytes": "3478"
}
],
"symlink_target": ""
} |
"""A collection of classes, metaclasses and functions for Plugins
As a plugin developer: Subclass from one of the JB_Plugin classes and implement the abstract functions.
The plugin managers load and hold plugins.
"""
import abc
import inspect
import os
import sys
import traceback
from collections import OrderedDict
from jukeboxcore.log import get_logger
log = get_logger(__name__)
from jukeboxcore import errors
from jukeboxcore.constants import PLUGIN_CONFIG_DIR, CONFIG_EXT, BUILTIN_PLUGIN_PATH
from jukeboxcore.iniconf import load_config, get_core_config
class JB_Plugin(object):
"""Abstract Base Class for jukebox plugins.
Subclass this to create your own types of plugins. The name of the subclass will be the name
of the plugin itself, so be sure to pick a unique one.
Else you will override an existing plugin (maybe that is your intend, then do it).
If you write a plugin, always subclass from a subclass of JB_Plugin
but not JB_Plugin directly!
For subclassing: you have to implement **init** and **uninit**!
Metadata:
This class has a few public attributes. Override them to supply metadata for your plugin.
User Config:
Every Plugin can have its own userpreference file.
The user preferences are ini-files that lie in the config folder
inside the pipeline user directory.
As a plugin developer, create a configspec file in the same folder as your plugin module.
Do it only, if you need to use get_config().
"""
__metaclass__ = abc.ABCMeta
__UNLOADED = False
__LOADED = True
required = ()
"""The plugins required to run this one successfully.
Set this to a list of strings with the required classnames."""
author = None
"""The author of the plugin."""
copyright = None
"""Copyright information."""
license = None
""" License information."""
version = None
"""The version of the plugin."""
description = None
"""A descriptive text for the plugin."""
def __init__(self):
"""Constructs a new Plugin
:returns: None
:rtype: None
:raises: None
"""
self.__status = self.__UNLOADED
def __str__(self, ):
"""Return the plugin name
:returns: the plugin name
:rtype: str
:raises: None
"""
return self.name
def _load(self, ):
"""Loads the plugin
:raises: errors.PluginInitError
"""
try:
self.init()
except Exception as e:
log.exception("Load failed!")
raise errors.PluginInitError('%s' % e)
self.__status = self.__LOADED
def _unload(self, ):
"""Unloads the plugin
:raises: errors.PluginUninitError
"""
try:
self.uninit()
except Exception as e:
log.exception("Unload failed!")
raise errors.PluginUninitError('%s' % e)
self.__status = self.__UNLOADED
@abc.abstractmethod
def init(self, ):
"""Initialize the plugin
This function gets called when the plugin is loaded by the plugin manager.
It is abstract and has to be implemented in a subclass
:returns:
:rtype:
:raises:
"""
pass
@abc.abstractmethod
def uninit(self, ):
"""Uninitialize the plugin
This function gets called when the plugin is unloaded by the plugin manager.
It is abstract and has to be implemented in a subclass
:returns:
:rtype:
:raises:
"""
pass
def is_loaded(self, ):
"""Return True if the plugin is loaded
:returns: Returns False if the plugin is not loaded
:rtype: bool
:raises: None
"""
return self.__status
def get_config(self, ):
"""Return the user config for this plugin
You have to provide a configspec,
put the configspec file in the same folder as your plugin.
Name it like your class and put 'ini' as extension.
"""
# get the module of the plugin class
mod = sys.modules[self.__module__]
# get the file from where it was imported
modfile = mod.__file__
# get the module directory
specdir = os.path.dirname(modfile)
# get the classname
cname = self.__class__.__name__
# add the extension
confname = os.extsep.join((cname, CONFIG_EXT))
specpath = os.path.join(specdir, confname)
if not os.path.exists(specpath):
return None
confpath = os.path.join(PLUGIN_CONFIG_DIR, confname)
return load_config(confpath, specpath)
@property
def name(self, ):
"""Return the name of the plugin. Equivalent quering __class__.__name__
:returns: The name of the plugin
:rtype: str
:raises: None
"""
return self.__class__.__name__
class JB_StandalonePlugin(JB_Plugin):
"""Abstract plugin class for standalone addons.
Standalone addons feature a special run method an
can be run with the jukebox launcher.
The launcher will first initialize the plugin and then
call the run method.
For subclassing: you have to implement **init**, **unit** and **run**!
"""
@abc.abstractmethod
def run(self, ):
"""Start the plugin. This method is also called by
the jukebox launcher.
:returns: None
:rtype: None
:raises: None
"""
pass
class JB_StandaloneGuiPlugin(JB_StandalonePlugin):
"""Abstract plugin class for standalone addons that need a gui.
Standalone addons feature a special run method an
can be run with the jukebox launcher.
The launcher will first initialize the plugin and then
call the run method.
The launcher will also initialize the gui before running the plugin.
For subclassing: you have to implement **init**, **unit** and **run**!
"""
pass
class JB_CorePlugin(JB_Plugin):
"""Core plugin class
Core plugins should be loadable at all times and not require a
specific software to run.
For subclassing: you have to implement **init** and **uninit**!
"""
pass
class JB_CoreStandalonePlugin(JB_StandalonePlugin, JB_CorePlugin):
"""Core plugin for standalone addons.
Standalone addons feature a special run method an
can be run with the jukebox launcher.
The launcher will first initialize the plugin and then
call the run method.
For subclassing: you have to implement **init**, **unit** and **run**!
"""
pass
class JB_CoreStandaloneGuiPlugin(JB_StandaloneGuiPlugin, JB_CoreStandalonePlugin):
"""Core plugin for standalone addons that also need a gui.
Standalone addons feature a special run method an
can be run with the jukebox launcher.
The launcher will first initialize the plugin and then
call the run method.
For subclassing: you have to implement **init**, **unit** and **run**!
"""
pass
class PluginManager(object):
"""Loads and unloads core plugins.
A plugin manager scanns the plugin directories for plugins.
Only plugins types that are supported can be loaded.
If you need special plugins for a software, subclass JB_Plugin.
Then create a subclass of this plugin manager and override
supportedTypes. Core plugins should always be supported.
The gathering of plugins is done during initialisation.
To load the plugins, call load_plugins(). This will load
all found plugins.
"""
instance = None
"""PluginManager instance when using PluginManager.get() """
supportedTypes = [JB_CorePlugin, JB_CoreStandalonePlugin, JB_CoreStandaloneGuiPlugin]
""" A list of plugin classes, the manager can load.
Override this list in a subclass if you want to support more than just core plugins,
e.g. plugins that are meant for a specific software.
"""
@classmethod
def get(cls):
"""Return a PluginManager Instance.
This will always return the same instance. If the instance is not available
it will be created and returned.
There should only be one pluginmanager at a time. If you create a PluginManager with get()
and use get() on for example a MayaPluginManager,
the PluginManager instance is returned (not a MayaPluginManager).
:returns: always the same PluginManager
:rtype: PluginManager
:raises: None
"""
if not cls.instance:
PluginManager.instance = cls()
return cls.instance
def __init__(self, ):
"""Constructs a new PluginManager, use the get method in 99% of cases!
:raises: None
"""
pluginclasses = self.gather_plugins()
self.__plugins = OrderedDict()
for p in pluginclasses:
self.__plugins[p.__name__] = p()
def find_plugins(self, path):
"""Return a list with all plugins found in path
:param path: the directory with plugins
:type path: str
:returns: list of JB_Plugin subclasses
:rtype: list
:raises: None
"""
ext = os.extsep+'py'
files = []
for (dirpath, dirnames, filenames) in os.walk(path):
files.extend([os.path.join(dirpath, x) for x in filenames if x.endswith(ext)])
plugins = []
for f in files:
try:
mod = self.__import_file(f)
except Exception:
tb = traceback.format_exc()
log.debug("Importing plugin from %s failed!\n%s" % (f, tb))
continue
# get all classes in the imported file
members = inspect.getmembers(mod, lambda x: inspect.isclass(x))
# only get classes which are defined, not imported, in mod
classes = [m[1] for m in members if m[1].__module__ == mod.__name__]
for c in classes:
# if the class is derived from a supported type append it
# we test if it is a subclass of a supported type but not a supported type itself
# because that might be the abstract class
if any(issubclass(c, supported) for supported in self.supportedTypes)\
and c not in self.supportedTypes:
plugins.append(c)
return plugins
def gather_plugins(self):
"""Return all plugins that are found in the plugin paths
Looks in the envvar ``JUKEBOX_PLUGIN_PATH``.
:returns:
:rtype:
:raises:
"""
plugins = []
cfg = get_core_config()
pathenv = cfg['jukebox']['pluginpaths']
pathenv = os.pathsep.join((pathenv, os.environ.get("JUKEBOX_PLUGIN_PATH", "")))
paths = pathenv.split(os.pathsep)
# first find built-ins then the ones in the config, then the one from the environment
# so user plugins can override built-ins
for p in reversed(paths):
if p and os.path.exists(p): # in case of an empty string, we do not search!
plugins.extend(self.find_plugins(p))
return plugins
def load_plugins(self, ):
"""Loads all found plugins
:returns: None
:rtype: None
:raises: None
"""
for p in self.__plugins.values():
try:
self.load_plugin(p)
except errors.PluginInitError:
log.exception('Initializing the plugin: %s failed.' % p)
def load_plugin(self, p):
"""Load the specified plugin
:param p: The plugin to load
:type p: Subclass of JB_Plugin
:returns: None
:rtype: None
:raises: errors.PluginInitError
"""
if p.is_loaded():
return
# load required plugins first
reqnames = p.required
reqplugins = []
for name in reqnames:
try:
reqplugins.append(self.__plugins[name])
except KeyError as e:
log.error("Required Plugin %s not found. Cannot load %s." % (name, p))
raise errors.PluginInitError('Required Plugin %s not found. Cannot load %s. Reason: %s' % (name, p, e))
for plug in reqplugins:
try:
self.load_plugin(plug)
except errors.PluginInitError as e:
log.error("Required Plugin %s could not be loaded. Cannot load %s" % (plug, p))
raise errors.PluginInitError('Required Plugin %s could not be loaded. Cannot load %s. Reason: %s' % (plug,p, e))
# load the actual plugin
p._load()
log.info('Initialized the plugin: %s' % p)
def unload_plugins(self, ):
""" Unloads all loaded plugins
:returns: None
:rtype: None
:raises: None
"""
for p in self.__plugins.values():
if p.is_loaded():
try:
p._unload()
log.info('Uninitialized the plugin: %s' % p)
except errors.PluginUninitError:
log.error('Uninitialization of the plugin: %s failed.' % p)
def __import_file(self, f):
"""Import the specified file and return the imported module
:param f: the file to import
:type f: str
:returns: The imported module
:rtype: module
:raises: None
"""
directory, module_name = os.path.split(f)
module_name = os.path.splitext(module_name)[0]
path = list(sys.path)
sys.path.insert(0, directory)
module = __import__(module_name)
return module
def get_plugin(self, plugin):
"""Return the plugin instance for the given pluginname
:param plugin: Name of the plugin class
:type plugin: str
:returns: the plugin that matches the name
:rtype: JB_Plugin like
:raises: None
"""
return self.__plugins[plugin]
def get_all_plugins(self, ):
"""Return all plugins
:returns: a list of all plugins found by the manager
"""
return self.__plugins.values()
| {
"content_hash": "3b0c27c90d65f50e0a4945d1399ed62a",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 128,
"avg_line_length": 31.814317673378078,
"alnum_prop": 0.6069193446311792,
"repo_name": "JukeboxPipeline/jukebox-core",
"id": "fc878b60b2f6c362b28e60f18ccb648f3cc0a7e8",
"size": "14221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jukeboxcore/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1221"
},
{
"name": "Python",
"bytes": "890248"
},
{
"name": "Shell",
"bytes": "962"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
from mnist_loader import make_mnist_subset, load_mnist
from old_files.network import multilayer_perceptron, run_training_cycle, test_model
training_data, validation_data, test_data = load_mnist()
# Parameters
learning_rate = 0.001
n_training_epochs = 15
batch_size = 64
display_step = 1
# Network Parameters/home/jesse/Code/neural-networks-and-deep-learning/data/mnist.pkl.gz
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
slice_pred = tf.slice(pred, [0, 0], [-1, 2])
slice_y = tf.slice(y, [0, 0], [-1, 2])
cost_zero_one = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=slice_pred, labels=slice_y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_zero_one)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
task = 0
train_x, train_y = make_mnist_subset(training_data, [task, task + 1])
test_x, test_y = make_mnist_subset(test_data, [task, task + 1])
scores = np.full(5, np.NaN)
train_x_01, train_y_01 = make_mnist_subset(training_data, [0, 1])
test_x_01, test_y_01 = make_mnist_subset(test_data, [0, 1])
train_x_23, train_y_23 = make_mnist_subset(training_data, [5, 6])
test_x_23, test_y_23 = make_mnist_subset(test_data, [5, 6])
################### TASK 1 #######################
sess1 = tf.Session()
with sess1 as sess:
sess.run(init)
run_training_cycle(sess, x, y, train_x_01, train_y_01, n_training_epochs, batch_size, optimizer, cost_zero_one)
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
################### TASK 2 #######################
task = 2
train_x, train_y = make_mnist_subset(training_data, [task, task + 1])
test_x, test_y = make_mnist_subset(test_data, [task, task + 1])
slice_pred = tf.slice(pred, [0, 2], [-1, 2])
slice_y = tf.slice(y, [0, 2], [-1, 2])
cost_zero_one = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=slice_pred, labels=slice_y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_zero_one)
sess2 = tf.Session()
with sess2 as sess:
sess.run(init)
run_training_cycle(sess, x, y, train_x_23, train_y_23, n_training_epochs, batch_size, optimizer, cost_zero_one)
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
slice_pred = tf.slice(pred, [0, 0], [-1, 2])
slice_y = tf.slice(y, [0, 0], [-1, 2])
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
train_x_01, train_y_01 = make_mnist_subset(training_data, [0, 1])
test_x_01, test_y_01 = make_mnist_subset(test_data, [0, 1])
train_x_23, train_y_23 = make_mnist_subset(training_data, [5, 6])
test_x_23, test_y_23 = make_mnist_subset(test_data, [5, 6])
################### TASK 1 #######################
sess1 = tf.Session()
init = tf.global_variables_initializer()
with sess1 as sess:
sess.run(init)
slice_pred = tf.slice(pred, [0, 2], [-1, 2])
slice_y = tf.slice(y, [0, 2], [-1, 2])
cost_zero_one = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=slice_pred, labels=slice_y))
temp = set(tf.global_variables())
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_zero_one)
sess.run(tf.variables_initializer(set(tf.global_variables()) - temp))
run_training_cycle(sess, x, y, train_x_01, train_y_01, n_training_epochs, batch_size, optimizer, cost_zero_one)
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
################### TASK 2 #######################
task = 2
slice_pred = tf.slice(pred, [0, 2], [-1, 2])
slice_y = tf.slice(y, [0, 2], [-1, 2])
cost_zero_one = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=slice_pred, labels=slice_y))
temp = set(tf.global_variables())
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_zero_one)
print(set(tf.global_variables()) - temp)
sess.run(tf.variables_initializer(set(tf.global_variables()) - temp))
run_training_cycle(sess, x, y, train_x_23, train_y_23, n_training_epochs, batch_size, optimizer, cost_zero_one)
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
slice_pred = tf.slice(pred, [0, 0], [-1, 2])
slice_y = tf.slice(y, [0, 0], [-1, 2])
test_model(x, y, slice_pred, slice_y, test_x_01, test_y_01)
| {
"content_hash": "41673b0aa2486c2a51261ff4adfabafa",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 115,
"avg_line_length": 33.58441558441559,
"alnum_prop": 0.6490719257540604,
"repo_name": "jessegeerts/neural-nets",
"id": "6b3dc1eb77962d311214c9617ba09826ecef233b",
"size": "5172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_files/mnist_tf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1765413"
},
{
"name": "Python",
"bytes": "71010"
}
],
"symlink_target": ""
} |
"""
MoinMoin - PageList
print a list of pages whose title matches the search term
@copyright: @copyright: 2001-2003 Juergen Hermann <jh@web.de>,
2003-2008 MoinMoin:ThomasWaldmann
2008 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
Dependencies = ["namespace"]
from MoinMoin import search, wikiutil
from MoinMoin.macro.FullSearch import execute as fs_execute
def execute(macro, args):
_ = macro._
case = 0
# If called with empty or no argument, default to regex search for .+, the full page list.
needle = wikiutil.get_unicode(macro.request, args, 'needle', u'regex:.+')
return fs_execute(macro, needle, titlesearch=True, case=case)
| {
"content_hash": "df8db16f90f0167613e4399aeef12a04",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 94,
"avg_line_length": 30.375,
"alnum_prop": 0.6817558299039781,
"repo_name": "RealTimeWeb/wikisite",
"id": "00ce308feee6b1d989a1f8fc628599fae798b94d",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/macro/PageList.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import datetime
from logging import getLogger
LOG = getLogger(__name__)
class RecordDetail(object):
def __init__(self, name):
self.name = name
self.status = None
self.start_time = datetime.datetime.now()
self.stop_time = None
self.number_of_records = None
self.other_details = None
def save(self,):
# Do stuff to write to database
LOG.info('saving %s ', self)
def __str__(self):
return ('name: %s start: %s stop : %s' % (self.name, self.start_time, self.stop_time))
def stop(self, status):
LOG.info('stopping')
self.status = status
self.stop_time = datetime.datetime.now()
self.save()
| {
"content_hash": "00a68f2c9619b460d4f0681646389426",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 94,
"avg_line_length": 24.586206896551722,
"alnum_prop": 0.5834502103786816,
"repo_name": "atleypnorth/bp2dvcm",
"id": "9abb17ff58b2466e3146e840c7b1aefe809e4629",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recorddetail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15034"
}
],
"symlink_target": ""
} |
"""
WSGI config for todo_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todo_project.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "5aa62f5357d25394e7470e3a22770976",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 83,
"avg_line_length": 42.486486486486484,
"alnum_prop": 0.7907124681933843,
"repo_name": "linhyo/todo",
"id": "32f3eb29f8e7e2906a318bf8b006eec60c679767",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo_project/todo_project/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30312"
},
{
"name": "JavaScript",
"bytes": "122242"
},
{
"name": "Makefile",
"bytes": "5612"
},
{
"name": "Python",
"bytes": "54494"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
import sys
from peak.util.proxies import ObjectWrapper, ObjectProxy
from loaders import Lazy
class TestLazyLoader(object):
def setup_method(self, method):
self.name = '%s.pkg.module' % __package__
self.loader = Lazy(self.name, ['attr1', 'attr2'])
def teardown_method(self, method):
if self.name in sys.modules:
del sys.modules[self.name]
sys.meta_path.remove(self.loader)
def test_find_module_finds_module(self):
assert self.loader.find_module(self.name, '') is self.loader
def test_find_module_does_not_find_others(self):
assert self.loader.find_module('pkg.modal', '') is None
assert self.loader.find_module('pkg', '') is None
assert self.loader.find_module('module', '') is None
assert self.loader.find_module('some.module', '') is None
assert self.loader.find_module('a.b', '') is None
def test_load_module_returns_from_sys_modules_if_present(self):
module = object()
sys.modules[self.name] = module
assert self.loader.load_module(self.name) is module
def test_load_module_returns_cahced_module_when_already_loaded(self):
module = object()
self.loader.loaded = True
self.loader.module = module
assert self.loader.load_module(self.name) is module
assert sys.modules[self.name] is module
def test_load_module_creates_module_on_first_load_and_caches_it(self):
module = self.loader.load_module(self.name)
assert isinstance(module, ObjectWrapper)
assert self.loader.module is module
assert sys.modules[self.name] is module
def test_create_lazy_module_creates_object(self):
obj = self.loader.create_lazy_module()
assert isinstance(obj, ObjectWrapper)
assert hasattr(obj, 'attr1')
assert isinstance(obj.attr1, ObjectProxy)
assert hasattr(obj, 'attr2')
assert isinstance(obj.attr2, ObjectProxy)
def test_import_before_ready_gives_proxy_with_none_values(self):
from pkg import module
assert isinstance(module, ObjectWrapper)
assert module.__subject__ is None
assert module.attr1.__subject__ is None
assert module.attr2.__subject__ is None
assert not hasattr(module, 'attr3')
def test_import_after_ready_gives_proxy_with_real_values(self):
import pkg.module as module
self.loader.ready()
assert isinstance(module, ObjectWrapper)
assert module.__subject__ is not None
assert module.attr1 == 1
assert module.attr2 == 2
assert module.attr3 == 3
| {
"content_hash": "5267cac020bd407cd1bb795f317c7e5e",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 38.64705882352941,
"alnum_prop": 0.656392694063927,
"repo_name": "FelixLoether/python-loaders",
"id": "56b8c96db9281f1b673dd912d6b6476d49b54667",
"size": "2628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_lazy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13357"
},
{
"name": "Ruby",
"bytes": "69"
}
],
"symlink_target": ""
} |
from wtforms.validators import InputRequired
from flask_bombril.r import R
class Required(InputRequired):
def __init__(self):
self.message = R.string.validators.required_field
super(Required, self).__init__(message=self.message)
| {
"content_hash": "d8016c241ae38e8fdd9b74b521b82b76",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 31.375,
"alnum_prop": 0.7211155378486056,
"repo_name": "marcoprado17/flask-bone",
"id": "85b1abd08eee37ba553af7cb449a8e83fcc78620",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/flask_bombril/form_validators/required/required.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3196"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "JavaScript",
"bytes": "3983"
},
{
"name": "Python",
"bytes": "96101"
},
{
"name": "Shell",
"bytes": "2801"
}
],
"symlink_target": ""
} |
import numpy as np
def one_to_one_assignment(binary_cost, value_cost):
'''When mulitple True exists in row or column, it will pick one with the lowest value_cost.
'''
binary_cost = pick_closer(binary_cost, value_cost)
binary_cost = pick_closer(binary_cost.T, value_cost.T)
binary_cost = binary_cost.T
return binary_cost
def one_to_two_assignment(binary_cost, value_cost):
'''If there are more than two True in a row, make them to two.
'''
# First make sure the daughter is not shared by two parents
binary_cost = pick_closer(binary_cost.T, value_cost.T)
binary_cost = binary_cost.T
# pick two based on value_cost
binary_cost = pick_closer_two(binary_cost, value_cost)
return binary_cost
def find_one_to_one_assign(binary_cost):
cost = binary_cost.copy()
(_, col1) = np.where([np.sum(cost, 0) == 1])
cost[np.sum(cost, 1) != 1] = False
(row, col2) = np.where(cost)
good_row = [ci for ri, ci in zip(row, col2) if ci in col1]
good_col = [ri for ri, ci in zip(row, col2) if ci in good_row]
return good_row, good_col
def pick_closer(binary_cost, value_cost):
'''If there are several True in a row of binary_cost,
it will pick one with the lowest value_cost.
If mulitple elements have the same value_cost, it will pick the first one.
'''
for x in range(binary_cost.shape[0]):
binary_row = binary_cost[x, :]
value_row = value_cost[x, :]
if binary_row.any():
min_value = np.min(value_row[binary_row])
idx = np.where(value_row == min_value)[0][0]
binary_row[0:idx] = False
binary_row[idx+1:] = False
return binary_cost
def pick_closer_two(binary_cost, value_cost, PICK=2):
for x in range(binary_cost.shape[0]):
binary_row = binary_cost[x, :]
value_row = value_cost[x, :]
if binary_row.sum() > 1:
binary_row_copy = binary_row.copy()
sorted_idx = np.argsort(value_row[binary_row])
binary_row[:] = False
for i in sorted_idx[:PICK]:
idx = np.where(value_row == value_row[binary_row_copy][i])[0][0]
binary_row[idx] = True
return binary_cost
| {
"content_hash": "94bee6443e9e0321d2364e01bd186d57",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 95,
"avg_line_length": 37.11666666666667,
"alnum_prop": 0.6133812303547374,
"repo_name": "braysia/covertrack",
"id": "0f2a0d82d0e3732d6cb852d69599a8fa5af09433",
"size": "2227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covertrack/utils/pairwise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "682275"
},
{
"name": "Python",
"bytes": "250180"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
__author__ = 'bkeroack'
import logging
import elita.util
import bson
class MongoService:
# logspam
#__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, db):
'''
@type db = pymongo.database.Database
'''
assert db
self.db = db
def create_new(self, collection, keys, classname, doc, remove_existing=True):
'''
Creates new document in collection. Optionally, remove any existing according to keys (which specify how the
new document is unique)
Returns id of new document
'''
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(keys)
assert elita.util.type_check.is_optional_str(classname)
assert elita.util.type_check.is_dictlike(doc)
assert collection
# keys/classname are only mandatory if remove_existing=True
assert (keys and classname and remove_existing) or not remove_existing
if classname:
doc['_class'] = classname
existing = None
if remove_existing:
existing = [d for d in self.db[collection].find(keys)]
for k in keys:
doc[k] = keys[k]
if '_id' in doc:
del doc['_id']
id = self.db[collection].save(doc, fsync=True)
logging.debug("new id: {}".format(id))
if existing and remove_existing:
logging.warning("create_new found existing docs! deleting...(collection: {}, keys: {})".format(collection, keys))
keys['_id'] = {'$ne': id}
self.db[collection].remove(keys)
return id
def modify(self, collection, keys, path, doc_or_obj):
'''
Modifies document with the keys in doc. Does so atomically but remember that any key will overwrite the existing
key.
doc_or_obj could be None, zero, etc.
Returns boolean indicating success
'''
assert hasattr(path, '__iter__')
assert path
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection and keys
dlist = [d for d in self.db[collection].find(keys)]
assert dlist
canonical_id = dlist[0]['_id']
if len(dlist) > 1:
logging.warning("Found duplicate entries for query {} in collection {}; using the first and removing others"
.format(keys, collection))
keys['_id'] = {'$ne': canonical_id}
self.db[collection].remove(keys)
path_dot_notation = '.'.join(path)
result = self.db[collection].update({'_id': canonical_id}, {'$set': {path_dot_notation: doc_or_obj}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def save(self, collection, doc):
'''
Replace a document completely with a new one. Must have an '_id' field
'''
assert collection
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(doc)
assert '_id' in doc
return self.db[collection].save(doc)
def delete(self, collection, keys):
'''
Drop a document from the collection
Return whatever pymongo returns for deletion
'''
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection and keys
dlist = [d for d in self.db[collection].find(keys)]
assert dlist
if len(dlist) > 1:
logging.warning("Found duplicate entries for query {} in collection {}; removing all".format(keys,
collection))
return self.db[collection].remove(keys, fsync=True)
def update_roottree(self, path, collection, id, doc=None):
'''
Update the root tree at path [must be a tuple of indices: ('app', 'myapp', 'builds', '123-foo')] with DBRef
Optional doc can be passed in which will be inserted into the tree after adding DBRef field
Return boolean indicating success
'''
assert hasattr(path, '__iter__')
assert elita.util.type_check.is_string(collection)
assert id.__class__.__name__ == 'ObjectId'
assert elita.util.type_check.is_optional_dict(doc)
path_dot_notation = '.'.join(path)
root_tree_doc = doc if doc else {}
root_tree_doc['_doc'] = bson.DBRef(collection, id)
result = self.db['root_tree'].update({}, {'$set': {path_dot_notation: root_tree_doc}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def rm_roottree(self, path):
'''
Delete/remove the root_tree reference at path
'''
assert hasattr(path, '__iter__')
assert path
path_dot_notation = '.'.join(path)
result = self.db['root_tree'].update({}, {'$unset': {path_dot_notation: ''}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def get(self, collection, keys, multi=False, empty=False):
'''
Thin wrapper around find()
Retrieve a document from Mongo, keyed by name. Optionally, if duplicates are found, delete all but the first.
If empty, it's ok to return None if nothing matches
Returns document
@rtype: dict | list(dict) | None
'''
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection
dlist = [d for d in self.db[collection].find(keys)]
assert dlist or empty
if len(dlist) > 1 and not multi:
logging.warning("Found duplicate entries ({}) for query {} in collection {}; dropping all but the first"
.format(len(dlist), keys, collection))
keys['_id'] = {'$ne': dlist[0]['_id']}
self.db[collection].remove(keys)
return dlist if multi else (dlist[0] if dlist else dlist)
def dereference(self, dbref):
'''
Simple wrapper around db.dereference()
Returns document pointed to by DBRef
@type id: bson.DBRef
'''
assert dbref
assert dbref.__class__.__name__ == 'DBRef'
return self.db.dereference(dbref)
| {
"content_hash": "f6b477943c4d7afb62712407f080d486",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 125,
"avg_line_length": 40.1375,
"alnum_prop": 0.5879788227966366,
"repo_name": "bkeroack/elita",
"id": "b8faa9846445ccbf4299e7659e6413d0e49f6c67",
"size": "6422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elita/dataservice/mongo_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6464"
},
{
"name": "Makefile",
"bytes": "6754"
},
{
"name": "PowerShell",
"bytes": "4543"
},
{
"name": "Python",
"bytes": "373538"
},
{
"name": "Ruby",
"bytes": "7720"
},
{
"name": "Shell",
"bytes": "7153"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.