text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@wp2ch8hp$ewf@1@2b9pmns+(l%xpt*v157h#dwt3#)5q08*ya'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'twittr',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__ )))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "cd65f69583cdfcdd3f1c2a12cfc3e8fb",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 25.84,
"alnum_prop": 0.6814241486068111,
"repo_name": "evrom/django-example-for-class",
"id": "a25455e1b5a6ba168a7c882be4cdf83408aeed4b",
"size": "3230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/project/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "368"
},
{
"name": "Python",
"bytes": "7858"
}
],
"symlink_target": ""
} |
from nyawc.helpers.URLHelper import URLHelper
class Request(object):
"""The Request class contains details that were used to request the specified URL.
Attributes:
METHOD_OPTIONS (str): A request method that can be used to request the URL.
METHOD_GET (str): A request method that can be used to request the URL.
METHOD_HEAD (str): A request method that can be used to request the URL.
METHOD_POST (str): A request method that can be used to request the URL.
METHOD_PUT (str): A request method that can be used to request the URL.
METHOD_DELETE (str): A request method that can be used to request the URL.
parent_raised_error (bool): If the parent request raised an error (e.g. 404).
depth (int): The current crawling depth.
url (str): The absolute URL to use when making the request.
method (str): The request method to use for the request.
data (obj): The post data {key: value} OrderedDict that will be sent.
auth (obj): The (requests module) authentication class to use for the request.
cookies (obj): The (requests module) cookie jar to use for the request.
headers (obj): The headers {key: value} to use for the request.
proxies (obj): The proxies {key: value} to use for the request.
timeout (int): The amount of seconds to wait before a timeout exception will be thrown.
verify (mixed): True or False based on if certificates should be checked or else a path to a trusted bundle.
"""
METHOD_OPTIONS = "options"
METHOD_GET = "get"
METHOD_HEAD = "head"
METHOD_POST = "post"
METHOD_PUT = "put"
METHOD_DELETE = "delete"
def __init__(self, url, method=METHOD_GET, data=None, auth=None, cookies=None, headers=None, proxies=None, timeout=30, verify=True):
"""Constructs a Request instance.
Args:
url (str): The absolute URL to use when making the request.
method (str): The request method to use for the request.
data (obj): The post data {key: value} OrderedDict that will be sent.
auth (obj): The (requests module) authentication class to use for the request.
cookies (obj): The (requests module) cookie jar to use for the request.
headers (obj): The headers {key: value} to use for the request.
proxies (obj): The proxies {key: value} to use for the request.
timeout (int): The amount of seconds to wait before a timeout exception will be thrown.
verify (mixed): True or False based on if certificates should be checked or else a path to a trusted bundle.
"""
self.parent_raised_error = False
self.depth = 0
self.url = url
self.method = method
self.auth = auth
self.cookies = cookies
self.headers = headers
self.proxies = proxies
self.timeout = timeout
self.verify = verify
if method == self.METHOD_GET:
self.url = URLHelper.append_with_data(self.url, data)
self.data = None
else:
self.data = data
| {
"content_hash": "06d3f54c33eb9cb46158f4f040f65094",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 136,
"avg_line_length": 44.45070422535211,
"alnum_prop": 0.641318124207858,
"repo_name": "tijme/not-your-average-web-crawler",
"id": "11e19fc4b89e3cc723e7002a230fc885b1edddf7",
"size": "4290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nyawc/http/Request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "3034"
},
{
"name": "Python",
"bytes": "158382"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
import socketio
# set this to 'threading', 'eventlet', or 'gevent'
async_mode = 'eventlet'
sio = socketio.Server(async_mode=async_mode)
app = Flask(__name__)
app.wsgi_app = socketio.Middleware(sio, app.wsgi_app)
@app.route('/')
def index():
return render_template('latency.html')
@sio.on('ping')
def ping(sid):
sio.emit('pong', room=sid)
if __name__ == '__main__':
if async_mode == 'threading':
# deploy with Werkzeug
app.run(threaded=True)
elif async_mode == 'eventlet':
# deploy with eventlet
import eventlet
from eventlet import wsgi
wsgi.server(eventlet.listen(('', 5000)), app)
elif async_mode == 'gevent':
# deploy with gevent
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
if websocket:
pywsgi.WSGIServer(('', 5000), app,
handler_class=WebSocketHandler).serve_forever()
else:
pywsgi.WSGIServer(('', 5000), app).serve_forever()
else:
print('Unknown async_mode: ' + async_mode)
| {
"content_hash": "aaabad439d23a27b8d113afca45d1abe",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 27.02173913043478,
"alnum_prop": 0.5969428801287209,
"repo_name": "bharling/python-socketio",
"id": "191a069b4510b6aff0112d171fc4d69cc4868acd",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/latency.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68864"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models
from ._utils import *
from ..models import RelatedContent
from ..models import RelatedType
def generate_model():
one, two = generate_fake_articles(2)
t = RelatedType.objects.create(title="Some Random Type")
c = RelatedContent.objects.create(
related_type=t,
source_object=one,
destination_object=two
)
return one, two, c
class RelatedTypeTestCase(TestCase):
def test_has_title(self):
m = RelatedType()
self.assertModelHasField(m, "title")
class RelatedContentTestCase(TestCase):
def generate_model(self):
one, two, c = generate_model()
return c
def test_has_related_Content(self):
m = self.generate_model()
self.assertRelatedTo(m, "related_type", RelatedType)
def test_has_order(self):
m = self.generate_model()
self.assertModelHasField(m, "order", models.IntegerField)
def test_has_source_type(self):
m = self.generate_model()
self.assertRelatedTo(m, "source_type", ContentType)
def test_has_source_id(self):
m = self.generate_model()
self.assertModelHasField(m, "source_id", models.PositiveIntegerField)
def test_has_source_object(self):
m = self.generate_model()
self.assertTrue(hasattr(m, "source_object"))
def test_has_destination_type(self):
m = self.generate_model()
self.assertRelatedTo(m, "destination_type", ContentType)
def test_has_destination_id(self):
m = self.generate_model()
self.assertModelHasField(m, "destination_id",
models.PositiveIntegerField)
def test_has_destination_object(self):
m = self.generate_model()
self.assertTrue(hasattr(m, "destination_object"))
| {
"content_hash": "cc4ed737bfc8839521da0b4c896807c4",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 29.875,
"alnum_prop": 0.6673640167364017,
"repo_name": "texastribune/armstrong.apps.related_content",
"id": "f8268d71c54e8305f95c6b13a54b0e8b6d932f8c",
"size": "1912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "armstrong/apps/related_content/tests/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32907"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
import logging
import os
import pysam
import subprocess
class AnnotationError(Exception):
pass
def ensureIndexed(bedPath, preset="bed", trySorting=True):
if not bedPath.endswith(".gz"):
if not os.path.exists(bedPath+".gz"):
logging.info("bgzf compressing {}".format(bedPath))
pysam.tabix_compress(bedPath, bedPath+".gz")
if not os.path.exists(bedPath+".gz"):
raise Exception("Failed to create compress {preset} file for {file}; make sure the {preset} file is "
"sorted and the directory is writeable".format(preset=preset, file=bedPath))
bedPath += ".gz"
if not os.path.exists(bedPath+".tbi"):
logging.info("creating tabix index for {}".format(bedPath))
pysam.tabix_index(bedPath, preset=preset)
if not os.path.exists(bedPath+".tbi"):
raise Exception("Failed to create tabix index file for {file}; make sure the {preset} file is "
"sorted and the directory is writeable".format(preset=preset, file=bedPath))
line = pysam.Tabixfile(bedPath).fetch().next()
if len(line.strip().split("\t")) < 6 and preset == "bed":
raise AnnotationError("BED files need to have at least 6 (tab-delimited) fields (including "
"chrom, start, end, name, score, strand; score is unused)")
if len(line.strip().split("\t")) < 9 and preset == "bed":
raise AnnotationError("GFF/GTF files need to have at least 9 tab-delimited fields")
return bedPath
# def sortFile(uncompressedPath, preset):
# if preset == "bed":
# fields = {"chrom":0, "start":1, "end":2}
# elif preset == "gff":
# fields = {"chrom":0, "start":3, "end":4}
# sortCommand = "sort -k{chrom}V -k{start}n -k{end}n".format(**fields)
# tabixCommand = "{sort} {path} | bgzip > {path}.gz".format(sort=sortCommand, path=uncompressedPath)
# logging.info("Trying to sort input annotation file with command:")
# logging.info(" {}".format(tabixCommand))
# subprocess.check_call(tabixCommand, shell=True)
| {
"content_hash": "c5bbf300ec0568772bd870978bc64634",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 117,
"avg_line_length": 42.51020408163265,
"alnum_prop": 0.6341814690350456,
"repo_name": "gatoravi/svviz",
"id": "4b46f407929304802f7d622fa4d0e6e355d74c9f",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/svviz/tabix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "35516"
},
{
"name": "CSS",
"bytes": "1393"
},
{
"name": "HTML",
"bytes": "6173"
},
{
"name": "JavaScript",
"bytes": "24343"
},
{
"name": "Python",
"bytes": "210456"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ListSinceBlockTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def run_test (self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
assert_equal(self.is_network_split, False)
self.nodes[2].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 40)
assert_equal(self.nodes[3].getbalance(), 0)
# Split network into two
self.split_network()
assert_equal(self.is_network_split, True)
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
print('lastblockhash=%s' % (lastblockhash))
self.sync_all()
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert_equal(found, True)
if __name__ == '__main__':
ListSinceBlockTest().main()
| {
"content_hash": "bc34ab464801dc082a10a60bc33bad49",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 84,
"avg_line_length": 31.493333333333332,
"alnum_prop": 0.5779000846740051,
"repo_name": "zcoinofficial/zcoin",
"id": "2ac15cbbba9265354ed207ca01ac462583feffce",
"size": "2572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/listsinceblock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "977651"
},
{
"name": "C",
"bytes": "23449469"
},
{
"name": "C++",
"bytes": "11590916"
},
{
"name": "CMake",
"bytes": "96751"
},
{
"name": "CSS",
"bytes": "42324"
},
{
"name": "Dockerfile",
"bytes": "3182"
},
{
"name": "Gnuplot",
"bytes": "940"
},
{
"name": "HTML",
"bytes": "55527"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "Lua",
"bytes": "3321"
},
{
"name": "M4",
"bytes": "354106"
},
{
"name": "Makefile",
"bytes": "176315"
},
{
"name": "NASL",
"bytes": "177"
},
{
"name": "Objective-C++",
"bytes": "6795"
},
{
"name": "PHP",
"bytes": "4871"
},
{
"name": "POV-Ray SDL",
"bytes": "1480"
},
{
"name": "Perl",
"bytes": "18265"
},
{
"name": "Python",
"bytes": "1731667"
},
{
"name": "QMake",
"bytes": "1352"
},
{
"name": "Roff",
"bytes": "2388"
},
{
"name": "Ruby",
"bytes": "3216"
},
{
"name": "Rust",
"bytes": "119897"
},
{
"name": "Sage",
"bytes": "30192"
},
{
"name": "Shell",
"bytes": "314196"
},
{
"name": "SmPL",
"bytes": "5488"
},
{
"name": "SourcePawn",
"bytes": "12001"
},
{
"name": "q",
"bytes": "5584"
}
],
"symlink_target": ""
} |
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import gc
import logging
import sys
# Bokeh imports
from bokeh.document import Document
from bokeh.util.logconfig import basicConfig
# Module under test
import bokeh.document.modules as bdm # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
extra = []
class FakeMod:
__name__ = 'FakeMod'
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class TestDocumentModuleManager:
def test_basic(self) -> None:
d = Document()
dm = bdm.DocumentModuleManager(d)
assert len(dm) == 0
# module manager should only hold a weak ref
assert len(gc.get_referrers(d)) == 0
def test_add(self) -> None:
d = Document()
dm = bdm.DocumentModuleManager(d)
mod = FakeMod()
assert 'FakeMod' not in sys.modules
dm.add(mod)
assert 'FakeMod' in sys.modules
assert len(dm) == 1
del sys.modules["FakeMod"]
def test_add_twice_error(self) -> None:
d = Document()
dm = bdm.DocumentModuleManager(d)
mod = FakeMod()
assert 'FakeMod' not in sys.modules
dm.add(mod)
with pytest.raises(RuntimeError):
dm.add(mod)
del sys.modules["FakeMod"]
def test_destroy(self) -> None:
d = Document()
dm = bdm.DocumentModuleManager(d)
mod = FakeMod()
assert 'FakeMod' not in sys.modules
dm.add(mod)
assert 'FakeMod' in sys.modules
assert len(dm) == 1
dm.destroy()
assert len(dm) == 0
assert 'FakeMod' not in sys.modules
def test_extra_referrer_error(self, caplog: pytest.LogCaptureFixture) -> None:
d = Document()
dm = bdm.DocumentModuleManager(d)
mod = FakeMod()
assert 'FakeMod' not in sys.modules
dm.add(mod)
assert 'FakeMod' in sys.modules
assert len(dm) == 1
# add an extra referrer for Document.destroy to complain about
extra.append(mod)
import gc
# get_referrers behavior changed in Python 3.7, see https://github.com/bokeh/bokeh/issues/8221
assert len(gc.get_referrers(mod)) in (3,4)
with caplog.at_level(logging.ERROR):
dm.destroy()
assert "Module %r has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers:" % mod in caplog.text
assert len(caplog.records) == 1
assert 'FakeMod' not in sys.modules
assert len(dm) ==0
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
| {
"content_hash": "3b4a041c40c80e652c1aee27bc100101",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 143,
"avg_line_length": 26.924242424242426,
"alnum_prop": 0.44682048396173324,
"repo_name": "bokeh/bokeh",
"id": "fa5b0898d128e5884097a9c02631453d763440c9",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "tests/unit/bokeh/document/test_modules.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
import grpc
import demo_pb2
import demo_pb2_grpc
# from opencensus.trace.tracer import Tracer
# from opencensus.trace.exporters import stackdriver_exporter
# from opencensus.trace.ext.grpc import client_interceptor
# try:
# exporter = stackdriver_exporter.StackdriverExporter()
# tracer = Tracer(exporter=exporter)
# tracer_interceptor = client_interceptor.OpenCensusClientInterceptor(tracer, host_port='0.0.0.0:8080')
# except:
# tracer_interceptor = client_interceptor.OpenCensusClientInterceptor()
def send_confirmation_email(email, order):
channel = grpc.insecure_channel('0.0.0.0:8080')
# channel = grpc.intercept_channel(channel, tracer_interceptor)
stub = demo_pb2_grpc.EmailServiceStub(channel)
try:
response = stub.SendOrderConfirmation(demo_pb2.SendOrderConfirmationRequest(
email = email,
order = order
))
print('Request sent.')
except grpc.RpcError as err:
print(err.details())
print('{}, {}'.format(err.code().name, err.code().value))
if __name__ == '__main__':
print('Client for email service.') | {
"content_hash": "c10b54f65ce4eaf4bd285182ef446897",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 107,
"avg_line_length": 33.5625,
"alnum_prop": 0.7243947858472998,
"repo_name": "census-ecosystem/opencensus-microservices-demo",
"id": "e4e591fa647ced1d228bd42e83fd564299c49925",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/emailservice/email_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5001"
},
{
"name": "C#",
"bytes": "267730"
},
{
"name": "Dockerfile",
"bytes": "7595"
},
{
"name": "Go",
"bytes": "88127"
},
{
"name": "HTML",
"bytes": "21257"
},
{
"name": "Java",
"bytes": "16136"
},
{
"name": "JavaScript",
"bytes": "12899"
},
{
"name": "Python",
"bytes": "110827"
},
{
"name": "Shell",
"bytes": "8087"
}
],
"symlink_target": ""
} |
"""
Physics engines for top-down or platformers.
"""
# pylint: disable=too-many-arguments, too-many-locals, too-few-public-methods
from arcade.geometry import check_for_collision_with_list
from arcade.geometry import check_for_collision
from arcade.sprite import Sprite
from arcade.sprite import SpriteList
class PhysicsEngineSimple:
"""
This class will move everything, and take care of collisions.
"""
def __init__(self, player_sprite: Sprite, walls: SpriteList):
"""
Constructor.
"""
self.player_sprite = player_sprite
self.walls = walls
def update(self):
"""
Move everything and resolve collisions.
"""
# --- Move in the x direction
self.player_sprite.center_x += self.player_sprite.change_x
# Check for wall hit
hit_list = \
check_for_collision_with_list(self.player_sprite,
self.walls)
# If we hit a wall, move so the edges are at the same point
if len(hit_list) > 0:
if self.player_sprite.change_x > 0:
for item in hit_list:
self.player_sprite.right = min(item.left,
self.player_sprite.right)
elif self.player_sprite.change_x < 0:
for item in hit_list:
self.player_sprite.left = max(item.right,
self.player_sprite.left)
else:
print("Error, collision while player wasn't moving.")
# --- Move in the y direction
self.player_sprite.center_y += self.player_sprite.change_y
# Check for wall hit
hit_list = \
check_for_collision_with_list(self.player_sprite,
self.walls)
# If we hit a wall, move so the edges are at the same point
if len(hit_list) > 0:
if self.player_sprite.change_y > 0:
for item in hit_list:
self.player_sprite.top = min(item.bottom,
self.player_sprite.top)
elif self.player_sprite.change_y < 0:
for item in hit_list:
self.player_sprite.bottom = max(item.top,
self.player_sprite.bottom)
else:
print("Error, collision while player wasn't moving.")
class PhysicsEnginePlatformer:
"""
This class will move everything, and take care of collisions.
"""
def __init__(self, player_sprite: Sprite, platforms: SpriteList,
gravity_constant: float = 0.5):
"""
Constructor.
"""
self.player_sprite = player_sprite
self.platforms = platforms
self.gravity_constant = gravity_constant
def can_jump(self) -> bool:
"""
Method that looks to see if there is a floor under
the player_sprite. If there is a floor, the player can jump
and we return a True.
"""
# --- Move in the y direction
self.player_sprite.center_y -= 2
# Check for wall hit
hit_list = \
check_for_collision_with_list(self.player_sprite,
self.platforms)
result = False
if len(hit_list) > 0:
result = True
self.player_sprite.center_y += 2
return result
def update(self):
"""
Move everything and resolve collisions.
"""
# --- Add gravity
self.player_sprite.change_y -= self.gravity_constant
# --- Move in the y direction
self.player_sprite.center_y += self.player_sprite.change_y
# Check for wall hit
hit_list = \
check_for_collision_with_list(self.player_sprite,
self.platforms)
# If we hit a wall, move so the edges are at the same point
if len(hit_list) > 0:
if self.player_sprite.change_y > 0:
for item in hit_list:
self.player_sprite.top = min(item.bottom,
self.player_sprite.top)
elif self.player_sprite.change_y < 0:
for item in hit_list:
while check_for_collision(self.player_sprite, item):
self.player_sprite.bottom += 0.5
if item.change_x != 0:
self.player_sprite.center_x += item.change_x
else:
pass
# TODO: The code below can't execute, as "item" doesn't
# exist. In theory, this condition should never be arrived at.
# Collision while player wasn't moving, most likely
# moving platform.
# if self.player_sprite.center_y >= item.center_y:
# self.player_sprite.bottom = item.top
# else:
# self.player_sprite.top = item.bottom
self.player_sprite.change_y = min(0, hit_list[0].change_y)
# --- Move in the x direction
self.player_sprite.center_x += self.player_sprite.change_x
# Check for wall hit
hit_list = \
check_for_collision_with_list(self.player_sprite,
self.platforms)
# If we hit a wall, move so the edges are at the same point
if len(hit_list) > 0:
change_x = self.player_sprite.change_x
if change_x > 0:
for item in hit_list:
# See if we can "run up" a ramp
self.player_sprite.center_y += change_x
if check_for_collision(self.player_sprite, item):
self.player_sprite.center_y -= change_x
self.player_sprite.right = \
min(item.left, self.player_sprite.right)
elif change_x < 0:
for item in hit_list:
# See if we can "run up" a ramp
self.player_sprite.center_y -= change_x
if check_for_collision(self.player_sprite, item):
self.player_sprite.center_y -= change_x
self.player_sprite.left = max(item.right,
self.player_sprite.left)
else:
print("Error, collision while player wasn't moving.")
for platform in self.platforms:
if platform.change_x != 0 or platform.change_y != 0:
platform.center_x += platform.change_x
if platform.boundary_left is not None \
and platform.left <= platform.boundary_left:
platform.left = platform.boundary_left
if platform.change_x < 0:
platform.change_x *= -1
if platform.boundary_right is not None \
and platform.right >= platform.boundary_right:
platform.right = platform.boundary_right
if platform.change_x > 0:
platform.change_x *= -1
if check_for_collision(self.player_sprite, platform):
if platform.change_x < 0:
self.player_sprite.right = platform.left
if platform.change_x > 0:
self.player_sprite.left = platform.right
platform.center_y += platform.change_y
if platform.boundary_top is not None \
and platform.top >= platform.boundary_top:
platform.top = platform.boundary_top
if platform.change_y > 0:
platform.change_y *= -1
if platform.boundary_bottom is not None \
and platform.bottom <= platform.boundary_bottom:
platform.bottom = platform.boundary_bottom
if platform.change_y < 0:
platform.change_y *= -1
| {
"content_hash": "dc93108477e7f9d15feed5cd8aa467c4",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 78,
"avg_line_length": 38.966981132075475,
"alnum_prop": 0.5068393656942258,
"repo_name": "mikemhenry/arcade",
"id": "3be8e4db43369f8ce9dc4d45e0eedc74b35a1058",
"size": "8261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arcade/physics_engines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "166157"
},
{
"name": "Shell",
"bytes": "593"
}
],
"symlink_target": ""
} |
from __future__ import division
import time
import numpy as np
import AuxiliaryClass as AUX
np.set_printoptions(threshold = np.nan, linewidth=2000, suppress=True)
TimeModel=True #Activates timing the model
def Multi_Country(S,I,sigma):
#NOTE:To run the model, simply run the Multi_Country function with your chosen levels
#of the number of cohorts (S), the number of countries (I) and slope parameter (sigma)
#THIS SETS ALL OF THE USER PARAMETERS
#Country Rosters
I_dict = {"usa":0,"eu":1,"japan":2,"china":3,"india":4,"russia":5,"korea":6} #DONT CHANGE
I_touse = ["eu","russia","usa","japan","korea","china","india"] #CAN CHANGE
#Parameters Zone
g_A = 0.015 #Technical growth rate
beta_ann=.95 #Annual discount rate
delta_ann=.08 #Annual depreciation rate
alpha = .25 #Capital Share of production
chil = .52
chik = 1.0
mu = 2.29
Dem_Degree = int(4) #Must be an integer, 4 is the max, otherwise there will be collinearity
#Convergence Tolerances
demog_ss_tol = 1e-8 #Used in getting ss for population share
#PROGRAM LEVERS:
#For terminal output
PrintAges = False #Displays the current locations of the program inside key TPI functions
PrintSSEulErrors = True #Prints the euler errors in each attempt of
#calculating the steady state
PrintSS = True #Prints the result of the Steady State functions
Print_caTimepaths = False #Prints the consumption, assets, and bequests
#timepath as it gets filled in for each iteration of TPI
Print_HH_Eulers = False #Prints whether the equations for the household decisions
#are satisfied (Equations 3.22, 3.19, and sum(assets) = 0)
Print_Fill_Matricies_Time = False #Activiates Printing the total time it takes to
#fill the upper and lower diagonal matricies
Print_Reg_Coefficients = False #Prints the coefficients from the regression done to
#fit a polynomial to our regression data.
CheckerMode = False #Activates not printing much of anything, used
#in conjunction with RobustChecker.py
Iterate = True #Shows the current iteration number and the associated Eulers
ShaveTime = False #Shaves off a little more time for TPI.
SaveToSTATA = True #Saves the Fertility and Mortality rate matricies to a
#form STATA can read. With this, we can verify Python's results.
#For plots to display or save
DemogGraphs = False #Activates graphing graphs with demographic data and population shares
ShowSSGraphs = True #Activates graphs for steady-state solutions for
#consumption, assets, and bequests
ShowCompGraphs = True #Shows the graph which compares calculated
#Mortality/Fertility rates against Their non-calculated counterparts
iterations_to_plot = set([]) #Which iterations of the timepath fsolve you want to plot
SaveFinalTPIPlot = True #Saves the final (and hopefully converged) time
#path plot as a .png file
#For using differing ways to solve the model
UseDiffDemog = True #Turns on different demographics for each country
UseSSDemog = False #Activates using only steady state demographics for TPI calculation
UseCalcDemog = False #Uses the calculated mortality, migration and fertilty rates based
#on regression
UseDiffProductivities = False #Activates having e vary across cohorts
AddTime = True #Adds time into the polynomials.
#Adjusts the country list if we are using less than 7 Countries
if CheckerMode==False:
if len(I_touse) < I:
print "WARNING: We are changing I from", I, "to", len(I_touse),\
"to fit the length of I_touse. So the countries we are using now are",\
I_touse
I = len(I_touse)
time.sleep(2)
elif len(I_touse) > I:
print "WARNING: We are changing I_touse from", I_touse, "to", I_touse[:I],\
"so there are", I, "regions"
I_touse = I_touse[:I]
time.sleep(2)
##INPUTS INTO THE CLASS###
Country_Roster = (I_dict, I_touse)
HH_params = (S,I,beta_ann,sigma,Dem_Degree)
Firm_Params = (alpha, delta_ann, chil, chik, mu, g_A)
Levers = (PrintAges,CheckerMode,Iterate,UseDiffDemog,UseDiffProductivities,\
Print_Fill_Matricies_Time,ShaveTime,UseCalcDemog,ShowCompGraphs,Print_Reg_Coefficients,SaveToSTATA,AddTime)
#Initialize the class instance
Model = AUX.OLG(Country_Roster,HH_params,Firm_Params,Levers)
#Demographics
Model.Demographics(demog_ss_tol, UseSSDemog)
if DemogGraphs: Model.plotDemographics(T_touse="default", compare_across="T", data_year=0)
#Model.immigrationplot()
#STEADY STATE OUTER FSOLVE INITIAL GUESSES
k_ss_guess = np.ones(I)*.55
kf_ss_guess = np.ones(I-1)*.05
n_ss_guess = np.ones(I)*1.25
bq_ss_guess = np.ones(I)*.85
#STEADY STATE INNER FSVOLE INITIAL GUESSES
c_innerfsolve_guess = np.ones(I)*.05
#Steady State
Model.SteadyState(k_ss_guess,kf_ss_guess,n_ss_guess, bq_ss_guess,\
c_innerfsolve_guess, PrintSSEulErrors)
if PrintSS: Model.PrintSSResults()
if ShowSSGraphs: Model.plotSSResults()
#TPI is commented out until the steady state part is completed.
'''
#Timepath Iteration
r_init = Model.r_ss*1.05
bq_init = Model.bqindiv_ss*.95
a_init = Model.avec_ss*.7
Model.set_initial_values(r_init, bq_init, a_init)
Model.Timepath_optimize(Print_HH_Eulers, Print_caTimepaths, iterations_to_plot)
if SaveFinalTPIPlot: Model.plot_timepaths(SAVE=False)
'''
#Input parameters for S, I and sigma here then execute this file to
#run the model.
start = time.time()
# S-Number of Cohorts, I-Number of Countries
# S, I, and sigma. S and I are integers. Sigma may not be.
Multi_Country(100,7,4)
tottime=time.time()-start
if TimeModel==True:
minutes=int(tottime/60)
hours=int(minutes/60)
seconds=tottime-minutes*60
minutes=minutes-hours*60
print "The code took:", hours, "hours,", minutes, "minutes and", seconds,\
"seconds to complete"
| {
"content_hash": "81330d034769871d4a37d9f2b7724cd4",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 119,
"avg_line_length": 42.079470198675494,
"alnum_prop": 0.6639911866540762,
"repo_name": "OpenSourcePolicyCenter/multi-country",
"id": "554976ca0560dc360f3b81d5e8c11481adbfb49e",
"size": "6354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/7CountryElliptical/Main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "172"
},
{
"name": "Python",
"bytes": "726111"
},
{
"name": "TeX",
"bytes": "57764"
}
],
"symlink_target": ""
} |
"""
test_import_schema_config tests the nagios config formatter
copyright: 2015, (c) sproutsocial.com
author: Nicholas Flink <nicholas@sproutsocial.com>
"""
# This script requires the following packages to be installed:
# mock==1.0.1
# PyYAML==3.11
import import_schema_config
import logging
import os
import unittest
logging.basicConfig(level=logging.CRITICAL)
logger = logging.getLogger(__name__)
class TestImportSchemaConfig(unittest.TestCase):
yamlDict = None
ldapUrl = "ldaps://ldap.example.com"
productManagersList = ["tom", "dick", "harry"]
def setUp(self):
"""use a custom yaml file
NOTE: the overrideYamlDictForTests should only be called from tests
"""
self.yamlDict = {
'mysql_users': {
'amysql.ip.example.com':
["'adeveloper'@'%'"]},
'mysql_schemas': {
'amysql.ip.example.com':
["bdb", "adb"],
'bmysql.ip.example.com':
["cdb"]},
}
yamlFile = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "import_schema.yaml")
self.importSchemaConfig = import_schema_config.ImportSchemaConfig(yamlFile)
self.importSchemaConfig.overrideYamlDictForTests(self.yamlDict)
self.importEmptySchemaConfig = import_schema_config.ImportSchemaConfig(yamlFile)
self.importEmptySchemaConfig.overrideYamlDictForTests("{}")
def test_getMysqlUsers(self):
"""tests the getMysqlUsers function
returns a valid dict of servers => users
"""
users = self.importSchemaConfig.getMysqlUsers()
self.assertItemsEqual(users, ["amysql.ip.example.com"])
self.assertItemsEqual(self.yamlDict['mysql_users'].keys(),
["amysql.ip.example.com"])
self.assertItemsEqual(self.yamlDict['mysql_users']['amysql.ip.example.com'],
["'adeveloper'@'%'"])
def test_getMysqlSchemas(self):
"""tests the getMysqlSchemas function
returns a valid dict of servers => dbs
"""
schemas = self.importSchemaConfig.getMysqlSchemas()
self.assertItemsEqual(schemas, ["amysql.ip.example.com", "bmysql.ip.example.com"])
self.assertItemsEqual(self.yamlDict['mysql_schemas'].keys(),
["amysql.ip.example.com", "bmysql.ip.example.com"])
self.assertItemsEqual(self.yamlDict['mysql_schemas']['amysql.ip.example.com'],
["bdb", "adb"])
self.assertItemsEqual(self.yamlDict['mysql_schemas']['bmysql.ip.example.com'],
["cdb"])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d1b31cfb4dab2556a45e064d2e0a8266",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 114,
"avg_line_length": 40,
"alnum_prop": 0.6068840579710145,
"repo_name": "sproutsocial/mysql_permissions",
"id": "bb59e46e55561debb79da36077aacdf162286473",
"size": "2806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldap_mysql_granter/test_import_schema_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "151749"
},
{
"name": "Shell",
"bytes": "2245"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group
from actstream.models import Action
from actstream.tests.base import LTE
class GFKManagerTestCase(TestCase):
def setUp(self):
User = get_user_model()
self.user_ct = ContentType.objects.get_for_model(User)
self.group_ct = ContentType.objects.get_for_model(Group)
self.group, _ = Group.objects.get_or_create(name='CoolGroup')
self.user1, _ = User.objects.get_or_create(username='admin')
self.user2, _ = User.objects.get_or_create(username='Two')
self.user3, _ = User.objects.get_or_create(username='Three')
self.user4, _ = User.objects.get_or_create(username='Four')
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user2.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user3.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user4.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='joined',
target_content_type=self.group_ct,
target_object_id=self.group.id
)
def test_fetch_generic_relations(self):
# baseline without fetch_generic_relations
_actions = Action.objects.filter(actor_content_type=self.user_ct,
actor_object_id=self.user1.id)
def actions(): return _actions._clone()
num_content_types = len(set(actions().values_list(
'target_content_type_id', flat=True)))
n = actions().count()
# compare to fetching only 1 generic relation
full, generic = actions(), actions().fetch_generic_relations('target')
self.assertNumQueries(LTE(n + 1),
lambda: [a.target for a in full])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [a.target for a in generic])
action_targets = [(a.id, a.target) for a in actions()]
action_targets_fetch_generic = [
(a.id, a.target)
for a in actions().fetch_generic_relations('target')]
self.assertEqual(action_targets, action_targets_fetch_generic)
# compare to fetching all generic relations
num_content_types = len(set(sum(actions().values_list(
'actor_content_type_id', 'target_content_type_id'), ())))
full, generic = actions(), actions().fetch_generic_relations()
self.assertNumQueries(LTE(2 * n + 1),
lambda: [(a.actor, a.target) for a in full])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [(a.actor, a.target) for a in generic])
action_actor_targets = [(a.id, a.actor, a.target) for a in actions()]
action_actor_targets_fetch_generic_all = [
(a.id, a.actor, a.target)
for a in actions().fetch_generic_relations()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_all)
# fetch only 1 generic relation, but access both gfks
def generic():
return actions().fetch_generic_relations('target')
self.assertNumQueries(LTE(n + num_content_types + 2), lambda: [
(a.actor, a.target) for a in generic()])
action_actor_targets_fetch_generic_target = [
(a.id, a.actor, a.target) for a in generic()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_target)
| {
"content_hash": "dc2fd0fc698ad454078d240516a23654",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 43.876288659793815,
"alnum_prop": 0.5946898496240601,
"repo_name": "justquick/django-activity-stream",
"id": "da8194fed946e1e3018bac91b6ca23ac3d6dbe55",
"size": "4256",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "actstream/tests/test_gfk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "3098"
},
{
"name": "Makefile",
"bytes": "464"
},
{
"name": "Python",
"bytes": "109877"
}
],
"symlink_target": ""
} |
'''
Code for downloading and processing KITTI data (Geiger et al. 2013, http://www.cvlibs.net/datasets/kitti/)
Code borrowed from PredNet (Lotter et al. 2017, https://arxiv.org/abs/1605.08104)
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import requests
import urllib
import cv2
import hickle as hkl
import numpy as np
from bs4 import BeautifulSoup
from scripts.kitti_config import *
desired_im_sz = (64, 64)
categories = ['city', 'residential', 'road', 'campus']
# Recordings used for validation and testing.
# Were initially chosen randomly such that one of the city recordings was used for validation and one of each category was used for testing.
val_recordings = [('city', '2011_09_26_drive_0005_sync')]
test_recordings = [('city', '2011_09_26_drive_0104_sync'),
('residential', '2011_09_26_drive_0079_sync'),
('road', '2011_09_26_drive_0070_sync'),
('campus', '2011_09_28_drive_0021_sync')]
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
# Download raw zip files by scraping KITTI website
def download_data():
base_dir = os.path.join(DATA_DIR, 'raw/')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
for c in categories:
url = "http://www.cvlibs.net/datasets/kitti/raw_data.php?type=" + c
r = requests.get(url)
soup = BeautifulSoup(r.content)
drive_list = soup.find_all("h3")
drive_list = [d.text[:d.text.find(' ')] for d in drive_list]
print("Downloading set: " + c)
c_dir = base_dir + c + '/'
if not os.path.exists(c_dir): os.mkdir(c_dir)
for i, d in enumerate(drive_list):
print(str(i+1) + '/' + str(len(drive_list)) + ": " + d)
url = "http://kitti.is.tue.mpg.de/kitti/raw_data/" + d + "/" + d + "_sync.zip"
urllib.urlretrieve(url, filename=c_dir + d + "_sync.zip")
# unzip images
def extract_data():
for c in categories:
c_dir = os.path.join(DATA_DIR, 'raw/', c + '/')
_, _, zip_files = os.walk(c_dir).next()
for f in zip_files:
print('unpacking: ' + f)
spec_folder = f[:10] + '/' + f[:-4] + '/image_03/data*'
command = 'unzip -qq ' + c_dir + f + ' ' + spec_folder + ' -d ' + c_dir + f[:-4]
os.system(command)
# Create image datasets.
# Processes images and saves them in train, val, test splits.
def process_data():
splits = {s: [] for s in ['train', 'test', 'val']}
splits['val'] = val_recordings
splits['test'] = test_recordings
not_train = splits['val'] + splits['test']
for c in categories: # Randomly assign recordings to training and testing. Cross-validation done across entire recordings.
c_dir = os.path.join(DATA_DIR, 'raw', c + '/')
print ("Video Directory: ", c_dir)
_, folders, _ = os.walk(c_dir).next()
splits['train'] += [(c, f) for f in folders if (c, f) not in not_train]
for split in splits:
im_list = []
source_list = [] # corresponds to recording that image came from
for category, folder in splits[split]:
im_dir = os.path.join(DATA_DIR, 'raw/', category, folder, folder[:10], folder, 'image_03/data/')
# print (im_dir)
_, _, files = os.walk(im_dir).next()
im_list += [im_dir + f for f in sorted(files)]
source_list += [category + '-' + folder] * len(files)
print('Creating ' + split + ' data: ' + str(len(im_list)) + ' images')
X = np.zeros((len(im_list),) + desired_im_sz + (3,), np.uint8)
for i, im_file in enumerate(im_list):
# print("Image file being processed: ", im_file)
# im = imread(im_file)
try:
im = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i] = process_im(im, desired_im_sz)
except cv2.error as e:
print("Image file being processed: ", im_file)
print (e)
except IOError as e:
print (e)
hkl.dump(X, os.path.join(DATA_DIR, 'X_' + split + '.hkl'))
hkl.dump(source_list, os.path.join(DATA_DIR, 'sources_' + split + '.hkl'))
# resize and crop image
def process_im(im, desired_sz):
# target_ds = float(desired_sz[0])/im.shape[0]
im = cv2.resize(im, desired_im_sz, interpolation=cv2.INTER_AREA)
# im = imresize(im, (desired_sz[0], int(np.round(target_ds * im.shape[1]))))
# d = int((im.shape[1] - desired_sz[1]) / 2)
# im = im[:, d:d+desired_sz[1]]
return im
if __name__ == '__main__':
# download_data()
# extract_data()
process_data() | {
"content_hash": "36d099cde3e83e78e801399b7456f6f9",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 140,
"avg_line_length": 38.540983606557376,
"alnum_prop": 0.5765631646108039,
"repo_name": "AutonomyLab/deep_intent",
"id": "43ce3fc22396f2527e841a14187797abf24a24ad",
"size": "4702",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/gan_model/scripts/process_kitti.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1344228"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Some common multiprocessing tools for doing interpolation experiments.
These tools are specifically used to replace long running for-loops.
In particular, it executes a for-loop for a few iterations and stashes the
result into a file. In the case of `managed_multiprocessing_loop_to_numpy` data
is saved into a long numpy file. In the case of
`managed_multiprocessing_loop_to_ndjson` it is appended into a ndjson file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from multiprocessing.pool import ThreadPool
import os
import numpy as np
import landscape_explorers
import tensorflow as tf
logging = tf.logging
gfile = tf.gfile
DEFAULT_ALLOW_PICKLE = False
def managed_multiprocessing_loop_to_numpy(
prepared_arguments,
save_dir,
file_name_skeleton='interpolation_result{}.npy',
function_to_execute=landscape_explorers.landscape_explorer_parallel,
save_every=50,
):
"""Executes a function in parallel with intermediate caching of computations.
Given a list of tuples in `prepared_arguments`, this function executes
`function_to_exectue` on each of them. Intermediate results are saved into
`save_dir/file_name_skeleton` every `save_every` iterations.
Args:
prepared_arguments: Arguments to pass to function_to_execute. This should be
a list of tuples.
save_dir: The directory to save data to. Set this to be None to skip saving.
file_name_skeleton: A file_name with a `{}` to be used for saving
intermediate files. The final file will strip out `{}`.
function_to_execute: The function to execute in parallel. It must take one
argument, a tuple, that contains all the arguments it needs to run.
save_every: The number of prepared_arguments after which to save things.
Returns:
Processed list of results. If save_dir is givem, it also returns the name of
the final file saved.
"""
pool = ThreadPool()
logging.info('Starting multiprocessing loop.')
processed = []
temporary_files = []
for i in range(0, len(prepared_arguments), save_every):
processed += pool.map(function_to_execute,
prepared_arguments[i:i + save_every])
if save_dir is not None: # pylint: disable=pointless-statement
temporary_files += [os.path.join(save_dir, file_name_skeleton.format(i))]
with gfile.Open(temporary_files[-1], 'w') as f:
np.save(f, np.array(processed))
logging.info('Saved temporary file %s', temporary_files[-1])
if i > 0:
if gfile.Exists(temporary_files[-2]):
gfile.Remove(temporary_files[-2])
# pylint: disable=undefined-loop-variable
# Collect the remaining arguments.
if i + save_every <= len(prepared_arguments):
processed += pool.map(function_to_execute,
prepared_arguments[i + save_every:])
# pylint: enable=undefined-loop-variable
logging.info('Multiprocessing loop completed. Cleaning up now.')
pool.close()
pool.join()
# Delete temporary files and save data.
if save_dir is not None: # pylint: disable=pointless-statement
final_file = os.path.join(save_dir, file_name_skeleton.format(''))
with gfile.Open(final_file, 'w') as f:
np.save(f, np.array(processed), allow_pickle=DEFAULT_ALLOW_PICKLE)
logging.info('Saved all data.')
if gfile.Exists(temporary_files[-1]):
gfile.Remove(temporary_files[-1])
logging.info('Done loop.')
if save_dir is not None:
return processed, final_file
else:
return processed
def stash_to_ndjson(results, file_name):
"""Save result into a ndjson file.
Args:
results: A list of dicts containing the data to save.
file_name: The file name to save the data into.
"""
logging.log_first_n(logging.INFO, 'Stashing to file: %s', 1, file_name)
with gfile.Open(file_name, 'a') as writer:
for result in results:
writer.write(json.dumps(result))
writer.write('\n')
def managed_multiprocessing_loop_to_ndjson(
prepared_arguments,
save_dir,
file_name_skeleton='interpolation_result.ndjson',
function_to_execute=landscape_explorers.paired_landscape_explorer_parallel,
stash_results=stash_to_ndjson,
save_every=10,
):
"""Executes a function parallely stashes the results into a ndjson file.
The main advantage of this over the managed_multiprocessing_loop is that you
can run many processes of this and save into a common file without needing to
worry too much about preemptiveness.
Args:
prepared_arguments: Arguments to pass to function_to_execute. This should be
a list of tuples.
save_dir: The directory to save data to. Set this to be None to skip saving.
file_name_skeleton: A file_name with a `{}` to be used for saving
intermediate files. The final file will strip out `{}`.
function_to_execute: The function to execute in parallel. It must take one
argument, a tuple, that contains all the arguments it needs to run.
stash_results: A function that takes in the intermediate results and saves
it into the ndjson.
save_every: The number of prepared_arguments after which to save things.
Returns:
Processed list of results. If save_dir is givem, it also returns the name of
the final file saved.
"""
pool = ThreadPool()
logging.info('Starting multiprocessing loop.')
processed = []
if save_dir is not None:
file_name = os.path.join(save_dir, file_name_skeleton)
for i in range(0, len(prepared_arguments), save_every):
processed_chunk = pool.map(function_to_execute,
prepared_arguments[i:i + save_every])
if save_dir is not None: # pylint: disable=pointless-statement
stash_results(processed_chunk, file_name)
# pylint: disable=undefined-loop-variable
# Collect the remaining arguments.
if i + save_every <= len(prepared_arguments):
processed_chunk = pool.map(function_to_execute,
prepared_arguments[i + save_every:])
# pylint: enable=undefined-loop-variable
logging.info('Multiprocessing loop completed. Cleaning up now.')
pool.close()
pool.join()
# Delete temporary files and save data.
if save_dir is not None: # pylint: disable=pointless-statement
stash_results(processed_chunk, file_name)
logging.info('Done loop.')
if save_dir is not None:
return processed, file_name
else:
return processed
if __name__ == '__main__':
pass
| {
"content_hash": "7c5aa6d2ae2c719da0336416c041ebb4",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 34.903225806451616,
"alnum_prop": 0.7019408502772643,
"repo_name": "google-research/policy-learning-landscape",
"id": "1daad5a991ca4f20a450bf14b17456cb7baa2a6a",
"size": "7100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interpolation_experiments/multiprocessing_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "548712"
},
{
"name": "Python",
"bytes": "205307"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'portal.views.home', name='home'),
# url(r'^portal/', include('portal.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "e341fd0d5d8d15faeba3d9750a813239",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 32.705882352941174,
"alnum_prop": 0.6762589928057554,
"repo_name": "rajatguptarg/portal",
"id": "d91f67cee328d30e2da7480bd4f07e8e9f300edb",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portal/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7649"
}
],
"symlink_target": ""
} |
from wtforms import Form, StringField, TextAreaField, FloatField, SelectField
class ProjectForm(Form):
# The region where the project applies
region = SelectField('Region',
choices=[('', ''), ('Kosovo', 'Kosovo'), ('Montenegro', 'Montenegro'), ('Serbia', 'Serbia')])
# Name of the project.
name = StringField('Name')
# A description if the project.
description = TextAreaField('Description')
# Image URL field
image_url = StringField('Image URL')
# The cost of the project.
cost = FloatField('Cost')
currency = SelectField('Currency',
choices=[('', ''), ('EUR', 'Euro'), ('RSD', 'Serbian Dinar')])
# The data source.
source_type = SelectField('Source type',
choices=[('', ''), ('Link', 'Link (URL)'), ('Open Data', 'Open Data')])
# Reference to the source
source_ref = StringField('Source reference')
| {
"content_hash": "0aab0734e69450ebb3109dc80666733d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 101,
"avg_line_length": 30.82758620689655,
"alnum_prop": 0.616331096196868,
"repo_name": "opendatakosovo/relate-with-it",
"id": "84b70cb236fd81a86851147621b2e98e87fe84d0",
"size": "894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/mod_admin/projectform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135107"
},
{
"name": "HTML",
"bytes": "63766"
},
{
"name": "JavaScript",
"bytes": "17418"
},
{
"name": "Python",
"bytes": "21085"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
import os
import sys
from . import compat
from .config.data import DJANGO_VERSION_MATRIX, CMS_VERSION_MATRIX, VERSION_MATRIX
def query_yes_no(question, default=None): # pragma: no cover
"""
Ask a yes/no question via `raw_input()` and return their answer.
:param question: A string that is presented to the user.
:param default: The presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
Code borrowed from cookiecutter
https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = compat.input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please answer with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def supported_versions(django, cms):
"""
Convert numeric and literal version information to numeric format
"""
cms_version = None
django_version = None
try:
cms_version = float(cms)
except ValueError:
try:
cms_version = CMS_VERSION_MATRIX[cms]
except KeyError:
pass
try:
django_version = float(django)
except ValueError:
try:
django_version = DJANGO_VERSION_MATRIX[django]
except KeyError:
pass
try:
if (cms_version and django_version and
not VERSION_MATRIX[cms_version][0] <= django_version <= VERSION_MATRIX[cms_version][1]):
raise RuntimeError('Django and django CMS versions doesn\'t match: '
'Django %s is not supported by django CMS %s' % (django_version, cms_version))
except KeyError:
raise RuntimeError('Django and django CMS versions doesn\'t match: '
'Django %s is not supported by django CMS %s' % (django_version, cms_version))
return django_version, cms_version
def less_than_version(value):
"""
Converts the current version to the next one for inserting into requirements
in the ' < version' format
"""
items = list(map(int, str(value).split(".")))
if len(items) == 1:
items.append(0)
items[1] += 1
return ".".join(map(str, items))
class chdir(object):
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
| {
"content_hash": "5fa81063c3664d0b21725dbe4f7c0e26",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 109,
"avg_line_length": 31.67676767676768,
"alnum_prop": 0.5956632653061225,
"repo_name": "divio/djangocms-installer",
"id": "1f23b7be51cdbf9df75ca960b19517cbb32c4061",
"size": "3160",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "djangocms_installer/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6612"
},
{
"name": "Makefile",
"bytes": "1228"
},
{
"name": "Python",
"bytes": "124600"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import inspect
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
class EntityConfig(object):
"""
Defines the configuration for a mirrored entity.
"""
# The "watching" class variable is a list of tuples that specify what models this entity
# config watches and the function to extract entity models from the watching model. The
# function's return must be an iterable object.
#
# For example, assume we have an Account model that has a foreign key to a User
# model. Also, the User model has a M2M to Groups. If Groups are a super entity
# of an Account, the user must set up a watching variable so that the account
# is synced when the M2M on the user object is changed. This is because the
# M2M is not directly on the Account model and does not trigger Account syncing
# by default when changed. The watching variable would look like the following:
#
# watching = [
# (User, lambda user_model_obj: Account.objects.filter(user=user_model_obj))
# ]
#
watching = []
def get_display_name(self, model_obj):
"""
Returns a human-readable string for the entity.
"""
return u'{0}'.format(model_obj)
def get_entity_kind(self, model_obj):
"""
Returns a tuple for a kind name and kind display name of an entity.
By default, uses the app_label and model of the model object's content
type as the kind.
"""
model_obj_ctype = ContentType.objects.get_for_model(model_obj)
return (u'{0}.{1}'.format(model_obj_ctype.app_label, model_obj_ctype.model), u'{0}'.format(model_obj_ctype))
def get_entity_meta(self, model_obj):
"""
Retrieves metadata about an entity.
Returns:
A dictionary of metadata about an entity or None if there is no
metadata. Defaults to returning None
"""
return None
def get_is_active(self, model_obj):
"""
Describes if the entity is currently active.
Returns:
A Boolean specifying if the entity is active. Defaults to
returning True.
"""
return True
def get_super_entities(self, model_obj):
"""
Retrieves a list of all entities that have a "super" relationship with the
entity.
Returns:
A list of models. If there are no super entities, return a empty list.
Defaults to returning an empty list.
"""
return []
class EntityRegistry(object):
"""
Maintains all registered entities and provides a lookup table for models to related entities.
"""
def __init__(self):
# The registry of all models to their querysets and EntityConfigs
self._entity_registry = {}
# Stores a list of (model, qset_arg) tuples for each watching model
self._entity_watching = defaultdict(list)
@property
def entity_registry(self):
return self._entity_registry
@property
def entity_watching(self):
return self._entity_watching
def register_entity(self, model_or_qset, entity_config=None):
"""
Registers a model or queryset with an entity config. If the entity config is None, it defaults
to registering the model/qset to EntityConfig.
"""
if inspect.isclass(model_or_qset) and issubclass(model_or_qset, Model):
# If the provided parameter is a model, convert it to a queryset
model = model_or_qset
qset = None
elif issubclass(model_or_qset.__class__, (Manager, QuerySet)):
model = model_or_qset.model
qset = model_or_qset.all()
else:
raise ValueError('Must register a model class or queryset instance with an entity config')
entity_config = entity_config if entity_config is not None else EntityConfig
if not issubclass(entity_config, EntityConfig):
raise ValueError('Must register entity config class of subclass EntityConfig')
if model not in self._entity_registry:
self._entity_registry[model] = (qset, entity_config())
# Add watchers to the global look up table
for watching_model, entity_model_getter in entity_config.watching:
self._entity_watching[watching_model].append((model, entity_model_getter))
# Define the global registry variable
entity_registry = EntityRegistry()
def register_entity(model_or_qset):
"""
Registers the given model (or queryset) class and wrapped EntityConfig class with
django entity:
@register_entity(Author)
class AuthorConfig(EntityConfig):
pass
The user can similarly explicitly call register with
from django.registry import registry
entity_registry.register_entity(model_or_qset, entity_config)
"""
def _entity_config_wrapper(entity_config_class):
entity_registry.register_entity(model_or_qset, entity_config=entity_config_class)
return entity_config_class
return _entity_config_wrapper
| {
"content_hash": "f6f0608c2d1ba668bce0d711b3fa16dc",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 116,
"avg_line_length": 35.821917808219176,
"alnum_prop": 0.6552581261950287,
"repo_name": "wesleykendall/django-entity",
"id": "03ff3e7b80edb868ee3c3049355c4a613657a74f",
"size": "5230",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "entity/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125748"
}
],
"symlink_target": ""
} |
"""
Simple dynamodb health check.
"""
def check_health(graph):
# TODO: Is there a basic service health check to perform here akin to 'SELECT 1;' ?
pass
| {
"content_hash": "601759e07c1364c7f12a2dbd9d8ed38e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 87,
"avg_line_length": 18.11111111111111,
"alnum_prop": 0.6687116564417178,
"repo_name": "globality-corp/microcosm-dynamodb",
"id": "29529b880d8b9c9ce60d34278094fcbc1dfb495d",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "microcosm_dynamodb/health.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34649"
}
],
"symlink_target": ""
} |
from pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
test = Benchmark()
test.warmup_runs = 0
test.benchmark_runs = 1000
test.raw_metrics = set()
test.metrics = {'total_time'}
test.aggregated_metrics = {'total_time': ['total', 'mean']}
# Basic get test
test.url = 'http://localhost:8000/api/person/'
test.name = 'Basic GET'
print 'Basic GET test'
#cProfile.run('resttest.run_benchmark(test)', sort='cumtime')
# Test a generator PUT method
test.method = 'PUT'
test.set_url('http://localhost:8000/api/person/$id/', isTemplate=True)
test.headers = {'Content-Type': 'application/json'}
handler = ContentHandler()
handler.setup('{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "$id"}',
is_template_content=True)
test.body = handler
context = Context()
context.add_generator('gen', factory_generate_ids(starting_id=10)())
test.generator_binds = {'id': 'gen'}
print 'Running templated PUT test'
cProfile.run('resttest.run_benchmark(test, context=context)', sort='cumtime')
| {
"content_hash": "80f3cd2c31ff5e94df236348b5461bad",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 89,
"avg_line_length": 33.97142857142857,
"alnum_prop": 0.7325483599663583,
"repo_name": "netjunki/pyresttest",
"id": "dc2f3eb27e31bf772fcf0b77c522f0f571e9abe2",
"size": "1225",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "profile_benchmark.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "15982"
},
{
"name": "Python",
"bytes": "247981"
},
{
"name": "Shell",
"bytes": "5962"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import tempfile
import unittest
PLOVR_JAR = "plovr-81ed862.jar"
def find_docs_dir():
cwd = os.path.dirname(os.path.realpath(__file__))
prev_cwd = None
while cwd != prev_cwd:
if os.path.exists(os.path.join(cwd, "docs", PLOVR_JAR)):
return os.path.join(cwd, "docs")
prev_cwd = cwd
cwd = os.path.dirname(cwd)
raise Exception("Could not find plovr jar")
def main():
docs_dir = find_docs_dir()
input_files = []
for root, dirs, files in os.walk(docs_dir):
for file_name in files:
if file_name.endswith(".soy") and not file_name.startswith("__"):
input_files.append(os.path.join(root, file_name))
print("Building %s soy files." % len(input_files))
config_data = {
"id": "buck",
# This removes the warning about param being a reserved JS keyword.
"experimental-compiler-options": {"languageIn": "ECMASCRIPT5"},
"paths": docs_dir,
"inputs": input_files,
}
plovr_abspath = os.path.join(docs_dir, PLOVR_JAR)
fd, config_path = tempfile.mkstemp(suffix=".json", prefix="plovr_config")
try:
with open(config_path, "w") as config_file:
json.dump(config_data, config_file)
subprocess.check_output(["java", "-jar", plovr_abspath, "build", config_path])
finally:
os.close(fd)
os.unlink(config_path)
class TestBuckPackage(unittest.TestCase):
def test_no_soy_doc_syntax_error(self):
main()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "588c237c60c534fb3be10e230b64393d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 86,
"avg_line_length": 28.927272727272726,
"alnum_prop": 0.6071653048397234,
"repo_name": "JoelMarcey/buck",
"id": "e421d786ad63ee61d154e33396db10dc3e5ee53e",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "docs/soy_syntax_check.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
import scipy.io.wavfile as wav
import csv
import numpy as np
class SnoreClassifier:
def __init__(self):
self.KNN_THRESHOLD = 90
self.NUM_SNORES = 132 #135 actual trainings, rest are recorded
self.NUM_COUGHS = 10
self.NUM_TRAFFIC = 10
self.NUM_NOT_SNORES = self.NUM_COUGHS + self.NUM_TRAFFIC + 10
self.NUM_FILES = self.NUM_SNORES + self.NUM_NOT_SNORES
self.MFCC_FEATS = [1, 10]
self.NUM_FEATS = len(self.MFCC_FEATS)*3 #multiplied by 3 because of [max, min, avg]
self.knnData = np.empty((self.NUM_SNORES, self.NUM_FEATS), dtype=float, order='C') #Snoring MFCC coefficients
##########################################################################################
def initialiseData(self):
#Get Snoring data
for i in range(10,self.NUM_SNORES):
(fs,sig) = wav.read("FakeSnores/{0}.wav".format(i))
for j in range(len(self.MFCC_FEATS)):
self.knnData[i,j*3:j*3+3] = self.getMfccMMA(fs, sig, self.MFCC_FEATS[j])
##########################################################################################
def getMfccMMA(self, fs, sig, mfccFeat):
channel1 = np.empty((len(sig),1), dtype=float, order='C')
if (len(sig.shape)>1):
channel1 = sig[:,0] #Change Stereo to Mono channel
else:
channel1 = sig
mfcc_feat = mfcc(channel1, samplerate=fs, nfft=2048, preemph=0.95)
return [self.maximum(mfcc_feat[:,mfccFeat]), self.minimum(mfcc_feat[:,mfccFeat]),self.average(mfcc_feat[:,mfccFeat])]
##########################################################################################
def calcDiff(self, val1,val2):
diff = (val1 - val2)
if(diff < 0):
diff *= -1
return diff
##########################################################################################
def isSnore(self,fileName):
knnDataNS = np.empty((self.NUM_FEATS), dtype=float, order='C') #Not Snoring MFCC coefficients
diff = [0]*self.NUM_FEATS #difference between training and test for MFCC 1[max, min, avg] and MFCC 11 [max, min, avg]
count = 0
minIndDist = 4000000
(fs,sig) = wav.read("{0}.wav".format(fileName))
for j in range(len(self.MFCC_FEATS)):
knnDataNS[j*3:j*3+3] = self.getMfccMMA(fs, sig, self.MFCC_FEATS[j])
for j in range(self.NUM_SNORES): #range of training
indDist = 0 #Individual Distance for single test data point, squared
for i in range(self.NUM_FEATS):
diff[i] = self.calcDiff(self.knnData[j,i], knnDataNS[i])
#Get dist^2
indDist += diff[i]*diff[i]
if(minIndDist > indDist):
minIndDist = indDist
if(minIndDist < self.KNN_THRESHOLD):
print('Coeff: ',minIndDist) #If there are some incorrect classifications then you can check this to set a new threshold
count += 1
minIndDist = 4000000
if(count > 2):
return 1 #Snore
if(minIndDist < self.KNN_THRESHOLD):
pass
#print('Coeff: ',minIndDist) #If there are some incorrect classifications then you can check this to set a new threshold
#return 1 #Snore
else:
print('Coeff: ',minIndDist)
print('Count: ', count)
return 0 #Not snore
##########################################################################################
def average(self, sig):
sum = 0
if(sig.size > 1):
for i in range(sig.size):
sum += sig[i]
return (sum/sig.size)
else:
return sig
##########################################################################################
def maximum(self, sig):
max = 0
if(sig.size > 1):
for i in range(len(sig)):
if(max < sig[i]):
max = sig[i]
else:
max = sig
return max
##########################################################################################
def minimum(self, sig):
min = 4000000
if(sig.size > 1):
for i in range(sig.size):
if(min > sig[i]):
min = sig[i]
else:
min = sig
return min
##########################################################################################
| {
"content_hash": "4caea52f530c2dbacacdc155d01a467f",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 136,
"avg_line_length": 39.792,
"alnum_prop": 0.43546441495778043,
"repo_name": "atorresdi/IoTSeminar2017_SleepMonitoring",
"id": "858f6430aa3654c0e8434c194c2de454a0715e94",
"size": "4984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpi/snoreclassifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "65075"
},
{
"name": "Python",
"bytes": "12688"
}
],
"symlink_target": ""
} |
"""The tests the cover command line platform."""
import os
import tempfile
from unittest import mock
import pytest
from homeassistant.components.cover import DOMAIN
import homeassistant.components.cover.command_line as cmd_rs
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_CLOSE_COVER, SERVICE_OPEN_COVER,
SERVICE_STOP_COVER)
from homeassistant.setup import async_setup_component
@pytest.fixture
def rs(hass):
"""Return CommandCover instance."""
return cmd_rs.CommandCover(hass, 'foo', 'command_open', 'command_close',
'command_stop', 'command_state', None)
def test_should_poll_new(rs):
"""Test the setting of polling."""
assert rs.should_poll is True
rs._command_state = None
assert rs.should_poll is False
def test_query_state_value(rs):
"""Test with state value."""
with mock.patch('subprocess.check_output') as mock_run:
mock_run.return_value = b' foo bar '
result = rs._query_state_value('runme')
assert 'foo bar' == result
assert mock_run.call_count == 1
assert mock_run.call_args == mock.call('runme', shell=True)
async def test_state_value(hass):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'cover_status')
test_cover = {
'command_state': 'cat {}'.format(path),
'command_open': 'echo 1 > {}'.format(path),
'command_close': 'echo 1 > {}'.format(path),
'command_stop': 'echo 0 > {}'.format(path),
'value_template': '{{ value }}'
}
assert await async_setup_component(hass, DOMAIN, {
'cover': {
'platform': 'command_line',
'covers': {
'test': test_cover
}
}
}) is True
assert 'unknown' == hass.states.get('cover.test').state
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: 'cover.test'}, blocking=True)
assert 'open' == hass.states.get('cover.test').state
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: 'cover.test'}, blocking=True)
assert 'open' == hass.states.get('cover.test').state
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: 'cover.test'}, blocking=True)
assert 'closed' == hass.states.get('cover.test').state
| {
"content_hash": "a6c5d55293ff7b0e9faba969998c5ad9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 33.946666666666665,
"alnum_prop": 0.5989787902592302,
"repo_name": "tinloaf/home-assistant",
"id": "0e03539d58c19621a0c9e0e08a4ba9aae2410bf1",
"size": "2546",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/cover/test_command_line.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
"""
Volume driver test for Tintri storage.
"""
import mock
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume.drivers.tintri import TClient
from cinder.volume.drivers.tintri import TintriDriver
class FakeImage(object):
def __init__(self):
self.id = 'image-id'
self.name = 'image-name'
def __getitem__(self, key):
return self.__dict__[key]
class TintriDriverTestCase(test.TestCase):
def setUp(self):
super(TintriDriverTestCase, self).setUp()
self.context = context.get_admin_context()
kwargs = {'configuration': self.create_configuration()}
self._driver = TintriDriver(**kwargs)
self._driver._hostname = 'host'
self._driver._username = 'user'
self._driver._password = 'password'
self._driver._api_version = 'v310'
self._provider_location = 'localhost:/share'
self._driver._mounted_shares = [self._provider_location]
self.fake_stubs()
def create_configuration(self):
configuration = mock.Mock()
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
return configuration
def fake_stubs(self):
self.stubs.Set(TClient, 'login', self.fake_login)
self.stubs.Set(TClient, 'logout', self.fake_logout)
self.stubs.Set(TClient, 'get_snapshot', self.fake_get_snapshot)
self.stubs.Set(TintriDriver, '_move_cloned_volume',
self.fake_move_cloned_volume)
self.stubs.Set(TintriDriver, '_get_provider_location',
self.fake_get_provider_location)
self.stubs.Set(TintriDriver, '_set_rw_permissions',
self.fake_set_rw_permissions)
self.stubs.Set(TintriDriver, '_is_volume_present',
self.fake_is_volume_present)
self.stubs.Set(TintriDriver, '_is_share_vol_compatible',
self.fake_is_share_vol_compatible)
self.stubs.Set(TintriDriver, '_is_file_size_equal',
self.fake_is_file_size_equal)
def fake_login(self, user_name, password):
return 'session-id'
def fake_logout(self):
pass
def fake_get_snapshot(self, volume_name):
return 'snapshot-id'
def fake_move_cloned_volume(self, clone_name, volume_id, share=None):
pass
def fake_get_provider_location(self, volume_path):
return self._provider_location
def fake_set_rw_permissions(self, path):
pass
def fake_is_volume_present(self, volume_path):
return True
def fake_is_share_vol_compatible(self, volume, share):
return True
def fake_is_file_size_equal(self, path, size):
return True
@mock.patch.object(TClient, 'create_snapshot', mock.Mock())
def test_create_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
snapshot.volume = volume
self._driver.create_snapshot(snapshot)
@mock.patch.object(TClient, 'create_snapshot', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_create_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
snapshot.volume = volume
self.assertRaises(exception.VolumeDriverException,
self._driver.create_snapshot, snapshot)
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock())
def test_delete_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
self._driver.delete_snapshot(snapshot)
@mock.patch.object(TClient, 'delete_snapshot', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_delete_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
self.assertRaises(exception.VolumeDriverException,
self._driver.delete_snapshot, snapshot)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
def test_create_volume_from_snapshot(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual({'provider_location': self._provider_location},
self._driver.create_volume_from_snapshot(
volume, snapshot))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_create_volume_from_snapshot_failure(self):
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.VolumeDriverException,
self._driver.create_volume_from_snapshot,
volume, snapshot)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
@mock.patch.object(TClient, 'create_snapshot', mock.Mock())
def test_create_cloned_volume(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual({'provider_location': self._provider_location},
self._driver.create_cloned_volume(volume, volume))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
@mock.patch.object(TClient, 'create_snapshot', mock.Mock())
def test_create_cloned_volume_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.VolumeDriverException,
self._driver.create_cloned_volume, volume, volume)
@mock.patch.object(TClient, 'clone_volume', mock.Mock())
def test_clone_image(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual(({'provider_location': self._provider_location,
'bootable': True}, True),
self._driver.clone_image(
None, volume, 'image-name', FakeImage(), None))
@mock.patch.object(TClient, 'clone_volume', mock.Mock(
side_effect=exception.VolumeDriverException))
def test_clone_image_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
self.assertEqual(({'provider_location': None,
'bootable': False}, False),
self._driver.clone_image(
None, volume, 'image-name', FakeImage(), None))
def test_manage_existing(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=True):
self.assertEqual({'provider_location': self._provider_location},
self._driver.manage_existing(volume, existing))
def test_manage_existing_invalid_ref(self):
existing = fake_volume.fake_volume_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver.manage_existing, volume, existing)
def test_manage_existing_not_found(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=False):
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver.manage_existing, volume, existing)
@mock.patch.object(TintriDriver, '_move_file', mock.Mock(
return_value=False))
def test_manage_existing_move_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/source-volume'}
with mock.patch('os.path.isfile', return_value=True):
self.assertRaises(exception.VolumeDriverException,
self._driver.manage_existing,
volume, existing)
def test_manage_existing_get_size(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
file = mock.Mock(st_size=123 * units.Gi)
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', return_value=file):
self.assertEqual(float(file.st_size / units.Gi),
self._driver.manage_existing_get_size(
volume, existing))
def test_manage_existing_get_size_failure(self):
volume = fake_volume.fake_volume_obj(self.context)
existing = {'source-name': self._provider_location + '/' +
volume.name}
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', side_effect=OSError):
self.assertRaises(exception.VolumeDriverException,
self._driver.manage_existing_get_size,
volume, existing)
def test_unmanage(self):
volume = fake_volume.fake_volume_obj(self.context)
volume.provider_location = self._provider_location
self._driver.unmanage(volume)
| {
"content_hash": "e4debdaac7f1a8f216a376282a47b044",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 78,
"avg_line_length": 43.56,
"alnum_prop": 0.6205489235792266,
"repo_name": "JioCloud/cinder",
"id": "2921d3beff55bc9554b50ea313d76f25dafa6a4a",
"size": "10426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_tintri.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11977630"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils
import requests
from cinder.tests.functional import functional_helpers
class TestHealthCheckMiddleware(functional_helpers._FunctionalTestBase):
def test_healthcheck(self):
# We verify that we return a HTTP200 when calling api_get
url = 'http://%s:%s/healthcheck' % (self.osapi.host, self.osapi.port)
response = requests.request(
'GET',
url,
headers={'Accept': 'application/json'})
output = jsonutils.loads(response.content)
self.assertEqual(['OK'], output['reasons'])
| {
"content_hash": "f32d12eb60943d8ca468eee6faf05f47",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 34.88235294117647,
"alnum_prop": 0.6711635750421585,
"repo_name": "openstack/cinder",
"id": "7e7d2cb7c9ac22e5b2d91d657566aff8a8c08ee5",
"size": "1279",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/functional/test_middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078349"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
} |
import logging
import xpybutil
from .base import Widget
from barython.hooks.xorg import WindowHook
logger = logging.getLogger("barython")
class ActiveWindowWidget(Widget):
"""
Requires python-mpd2
"""
#: list of atom names to catch
_atom_names = ("_NET_ACTIVE_WINDOW", "WM_NAME")
@property
def active_window_name(self):
active_name = ""
try:
active_window = xpybutil.ewmh.get_active_window().reply()
active_name = xpybutil.ewmh.get_wm_name(active_window).reply()
except Exception as e:
logger.error("Cannot get the active window name: {}".format(e))
return active_name
def handler(self, events, *args, **kwargs):
for e, aname in events:
print(aname)
if aname in self._atom_names:
return super().handler(*args, **kwargs)
def update(self, *args, **kwargs):
return self.trigger_global_update(
self.organize_result(active_window=self.active_window_name)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.infinite = False
self.hooks.subscribe(self.handler, WindowHook, refresh=self.refresh)
| {
"content_hash": "cc28382319d4e03782efd043c2fa59d8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 29.30952380952381,
"alnum_prop": 0.6108854589764419,
"repo_name": "Anthony25/barython",
"id": "e01438edfa54f7d9a607d0f9cd4443a7027e329a",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barython/widgets/xorg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95222"
}
],
"symlink_target": ""
} |
import sys
import cStringIO
import py
from rpython.tool.udir import udir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.platform import CompilationError
from rpython.tool.gcc_cache import (
cache_file_path, build_executable_cache, try_compile_cache)
localudir = udir.join('test_gcc_cache').ensure(dir=1)
def test_gcc_exec():
f = localudir.join("x.c")
f.write("""
#include <stdio.h>
#include <test_gcc_exec.h>
int main()
{
printf("%d\\n", ANSWER);
return 0;
}
""")
dir1 = localudir.join('test_gcc_exec_dir1').ensure(dir=1)
dir2 = localudir.join('test_gcc_exec_dir2').ensure(dir=1)
dir1.join('test_gcc_exec.h').write('#define ANSWER 3\n')
dir2.join('test_gcc_exec.h').write('#define ANSWER 42\n')
eci = ExternalCompilationInfo(include_dirs=[str(dir1)])
# remove cache
path = cache_file_path([f], eci, 'build_executable_cache')
if path.check():
path.remove()
res = build_executable_cache([f], eci)
assert res == "3\n"
assert build_executable_cache([f], eci) == "3\n"
eci2 = ExternalCompilationInfo(include_dirs=[str(dir2)])
assert build_executable_cache([f], eci2) == "42\n"
f.write("#error BOOM\n")
err = py.test.raises(CompilationError, build_executable_cache, [f], eci2)
print '<<<'
print err
print '>>>'
def test_gcc_ask():
f = localudir.join("y.c")
f.write("""
#include <stdio.h>
#include <test_gcc_ask.h>
int main()
{
printf("hello\\n");
return 0;
}
""")
dir1 = localudir.join('test_gcc_ask_dir1').ensure(dir=1)
dir2 = localudir.join('test_gcc_ask_dir2').ensure(dir=1)
dir1.join('test_gcc_ask.h').write('/* hello world */\n')
dir2.join('test_gcc_ask.h').write('#error boom\n')
eci = ExternalCompilationInfo(include_dirs=[str(dir1)])
# remove cache
path = cache_file_path([f], eci, 'try_compile_cache')
if path.check():
path.remove()
assert try_compile_cache([f], eci)
assert try_compile_cache([f], eci)
assert build_executable_cache([f], eci) == "hello\n"
eci2 = ExternalCompilationInfo(include_dirs=[str(dir2)])
err = py.test.raises(CompilationError, try_compile_cache, [f], eci2)
print '<<<'
print err
print '>>>'
def test_gcc_ask_doesnt_log_errors():
f = localudir.join('z.c')
f.write("""this file is not valid C code\n""")
eci = ExternalCompilationInfo()
oldstderr = sys.stderr
try:
sys.stderr = capture = cStringIO.StringIO()
py.test.raises(CompilationError, try_compile_cache, [f], eci)
finally:
sys.stderr = oldstderr
assert 'ERROR' not in capture.getvalue().upper()
def test_execute_code_ignore_errors():
f = localudir.join('z.c')
f.write("""this file is not valid C code\n""")
eci = ExternalCompilationInfo()
oldstderr = sys.stderr
try:
sys.stderr = capture = cStringIO.StringIO()
py.test.raises(CompilationError, build_executable_cache,
[f], eci, True)
finally:
sys.stderr = oldstderr
assert 'ERROR' not in capture.getvalue().upper()
| {
"content_hash": "1de6f0dd3bf12d027d573cf7bb2ea185",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 33.37894736842105,
"alnum_prop": 0.6285083569851782,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "818352ac4f34c11f862027e2c89ad2dfec0d3968",
"size": "3171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/tool/test/test_gcc_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
"""
Модуль с преднастроенными панелями-гридами
"""
from __future__ import absolute_import
from itertools import chain
import json
from itertools import chain
from m3.actions.urls import get_url
from m3_ext.ui import containers
from m3_ext.ui import controls
from m3_ext.ui import menus
from m3_ext.ui import misc
from m3_ext.ui import render_component
from m3_ext.ui.base import BaseExtComponent
from m3_ext.ui.base import ExtUIComponent
from m3_ext.ui.containers.grids import ExtGridCheckBoxSelModel
from m3_ext.ui.fields import ExtSearchField
class ExtObjectGrid(containers.ExtGrid):
"""
Панель с гридом для управления списком объектов.
"""
#==========================================================================
# Внутренние классы для ExtObjectGrid
#=========================================================================
class GridContextMenu(menus.ExtContextMenu):
"""
Внутренний класс для удобной работы с контекстным меню грида
"""
def __init__(self, *args, **kwargs):
super(ExtObjectGrid.GridContextMenu, self).__init__(
*args, **kwargs)
self.menuitem_new = menus.ExtContextMenuItem(
text=u'Добавить',
icon_cls='add_item',
handler='contextMenuNew'
)
self.menuitem_edit = menus.ExtContextMenuItem(
text=u'Изменить',
icon_cls='edit_item',
handler='contextMenuEdit'
)
self.menuitem_delete = menus.ExtContextMenuItem(
text=u'Удалить',
icon_cls='delete_item',
handler='contextMenuDelete'
)
self.menuitem_separator = menus.ExtContextMenuSeparator()
self.init_component()
class GridTopBar(containers.ExtToolBar):
"""
Внутренний класс для удобной работы топбаром грида
"""
def __init__(self, *args, **kwargs):
super(ExtObjectGrid.GridTopBar, self).__init__(*args, **kwargs)
self.button_new = controls.ExtButton(
text=u'Добавить',
icon_cls='add_item',
handler='topBarNew'
)
self.button_edit = controls.ExtButton(
text=u'Изменить',
icon_cls='edit_item',
handler='topBarEdit'
)
self.button_delete = controls.ExtButton(
text=u'Удалить',
icon_cls='delete_item',
handler='topBarDelete'
)
self.button_refresh = controls.ExtButton(
text=u'Обновить',
icon_cls='x-tbar-loading',
handler='topBarRefresh'
)
self.items.extend([
self.button_new,
self.button_edit,
self.button_delete,
self.button_refresh,
])
self.init_component()
#==========================================================================
# Собственно определение класса ExtObjectGrid
#=========================================================================
def __init__(self, *args, **kwargs):
super(ExtObjectGrid, self).__init__(*args, **kwargs)
self.template = 'ext-grids/ext-object-grid.js'
#======================================================================
# Действия, выполняемые изнутри грида
#======================================================================
# Экшен для новой записи
self.action_new = None
# Экшен для изменения
self.action_edit = None
# Экшен для удаления
self.action_delete = None
# Экшен для данных
self.action_data = None
# Адрес для новой записи. Адреса имеют приоритет над экшенами!
self.url_new = None
# Адрес для изменения
self.url_edit = None
# Адрес для удаления
self.url_delete = None
# Адрес для данных
self.url_data = None
# Флаг о состоянии грида.
# True означает что грид предназначен только для чтения.
self.read_only = False
#======================================================================
# Источник данных для грида
#======================================================================
# Стор для загрузки данных
self.store = misc.ExtJsonStore(
auto_load=True, root='rows', id_property='id')
# Признак того, маскировать ли грид при загрузки
self.load_mask = True
# Поля для id записи
self.row_id_name = 'row_id'
# имя параметра, через который передается имя выделенной колонки
self.column_param_name = 'column'
# Использовать постраничную навигацию
self.allow_paging = True
#======================================================================
# Контекстное меню и бары грида
#======================================================================
# Контекстное меню для строки грида
self.context_menu_row = ExtObjectGrid.GridContextMenu()
# Контекстное меню для грида, если произошел счелчок не на строке
self.context_menu_grid = ExtObjectGrid.GridContextMenu()
# Топ бар для грида
self.top_bar = ExtObjectGrid.GridTopBar()
# Paging бар для постраничной навигации
self.paging_bar = containers.ExtPagingBar()
# Обработчик двойного клика
self.dblclick_handler = 'onEditRecord'
# Признак редактирования на клиенте
# - особенным образом обрабатываются данные при редактировании
self.local_edit = False
# Атрибут store из store baseParams вынесен,
# для одновременного изменения с атрибутом page_size paging_bar-а
self._limit = self.store.limit if hasattr(self.store, 'limit') else -1
# Список исключений для make_read_only
self._mro_exclude_list = []
self.init_component()
def add_search_field(self):
u"""Добавляет строку поиска в гриде."""
self.top_bar.search_field = ExtSearchField(
empty_text=u'Поиск', width=200, component_for_search=self)
self.top_bar.add_fill()
self.top_bar.items.append(self.top_bar.search_field)
self._mro_exclude_list.append(self.top_bar.search_field)
def _make_read_only(
self, access_off=True, exclude_list=(), *args, **kwargs):
self.read_only = access_off
_type = type(self._mro_exclude_list)
exclude_list = _type(chain(self._mro_exclude_list, exclude_list or []))
# Выключаем\включаем компоненты.
for item in (
self.context_menu_grid.menuitem_new,
self.context_menu_grid.menuitem_edit,
self.context_menu_grid.menuitem_delete,
self.context_menu_row.menuitem_new,
self.context_menu_row.menuitem_edit,
self.context_menu_row.menuitem_delete,
self.context_menu_row,
):
item.make_read_only(
access_off, exclude_list, *args, **kwargs
)
if hasattr(self.top_bar, 'items') and self.top_bar.items:
for item in self.top_bar.items:
if hasattr(item, 'make_read_only') and callable(
item.make_read_only):
item.make_read_only(
access_off, exclude_list, *args, **kwargs)
@property
def handler_beforenew(self):
return self._listeners.get('beforenewrequest')
@handler_beforenew.setter
def handler_beforenew(self, function):
self._listeners['beforenewrequest'] = function
@property
def handler_beforeedit(self):
return self._listeners.get('beforeeditrequest')
@handler_beforeedit.setter
def handler_beforeedit(self, function):
self._listeners['beforeeditrequest'] = function
def render(self):
"""
Переопределяем рендер грида для того,
чтобы модифицировать содержимое его
панелей и контекстных меню
"""
if self.action_new or self.url_new:
self.context_menu_row.items.append(
self.context_menu_row.menuitem_new)
self.context_menu_grid.items.append(
self.context_menu_grid.menuitem_new)
if self.action_edit or self.url_edit:
self.context_menu_row.items.append(
self.context_menu_row.menuitem_edit)
self.handler_dblclick = self.dblclick_handler
if self.action_delete or self.url_delete:
self.context_menu_row.items.append(
self.context_menu_row.menuitem_delete)
# контекстное меню прицепляется к гриду только в том случае, если
# в нем есть хотя бы один пункт
if self.context_menu_grid.items:
self.handler_contextmenu = self.context_menu_grid
if self.context_menu_row.items:
self.handler_rowcontextmenu = self.context_menu_row
#======================================================================
# Настройка top bar
#======================================================================
def remove(this):
if this in self.top_bar.items:
self.top_bar.items.remove(this)
# @TODO: Отрефакторить данный метод, чтобы он был не в рендеринге
if (not self.action_data and not self.url_data and
self.top_bar.button_refresh in self.top_bar.items):
remove(self.top_bar.button_refresh)
if (not self.action_delete and not self.url_delete and
self.top_bar.button_delete in self.top_bar.items):
remove(self.top_bar.button_delete)
if (not self.action_edit and not self.url_edit and
self.top_bar.button_edit in self.top_bar.items):
remove(self.top_bar.button_edit)
if (not self.action_new and not self.url_new and
self.top_bar.button_new in self.top_bar.items):
remove(self.top_bar.button_new)
# тонкая настройка self.store
if not self.store.url and self.action_data:
self.store.url = get_url(self.action_data)
if self.url_data:
self.store.url = self.url_data
# Стор может быть пустой
# assert self.store.url, 'Url for store or action_data is not define'
if self.allow_paging:
# Значение self.store.start и так будет равно 0
# Если store не экземпляр ExtJsonStore,
# то у него нет атрибута limit
if hasattr(self.store, 'limit'):
self.store.limit = (
self.store.limit if self.store.limit > 0 else 25)
self.bottom_bar = self.paging_bar
self.render_base_config()
self.render_params()
return render_component(self)
def render_params(self):
super(ExtObjectGrid, self).render_params()
# Получение адресов для грида.
# Текстовые адреса более приоритетны чем экшены!
if not self.url_new and self.action_new:
self.url_new = get_url(self.action_new)
if not self.url_edit and self.action_edit:
self.url_edit = get_url(self.action_edit)
if not self.url_delete and self.action_delete:
self.url_delete = get_url(self.action_delete)
if not self.url_data and self.action_data:
self.url_data = get_url(self.action_data)
context_json = (
self.action_context.json if self.action_context else None)
self._put_params_value(
'actions',
{
'newUrl': self.url_new,
'editUrl': self.url_edit,
'deleteUrl': self.url_delete,
'dataUrl': self.url_data,
'contextJson': context_json})
self._put_params_value('rowIdName', self.row_id_name)
self._put_params_value('columnParamName', self.column_param_name)
self._put_params_value('allowPaging', self.allow_paging)
self._put_params_value('readOnly', self.read_only)
self._put_params_value('localEdit', self.local_edit)
def t_render_base_config(self):
return self._get_config_str()
def t_render_params(self):
return self._get_params_str()
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, limit):
self._limit = limit
# Если не экземпляр ExtJSONStore, то у него нет атрибута limit.
if hasattr(self.store, 'limit'):
self.store.limit = limit
# Размер страниц устанавливаем только,
# если позволена постраничная навигация.
if self.allow_paging:
self.paging_bar.page_size = limit
class ExtMultiGroupinGrid(containers.ExtGrid):
"""
Грид с возможностью множественной группировки колонок.
Обработка группировки происходит на сервере
.. seealso::
m3.helpers.datagrouping
"""
class GridExportMenu(menus.ExtContextMenu):
"""
Внутренний класс для удобной работы с контекстным меню грида
"""
def __init__(self, *args, **kwargs):
super(ExtMultiGroupinGrid.GridExportMenu, self).__init__(
*args, **kwargs)
self.xls = menus.ExtContextMenuItem(
text=u'XLS (Excel2003 до 65000 строк)',
handler='function(){exportData("xls");}')
self.csv = menus.ExtContextMenuItem(
text=u'CSV (разделитель ";")',
handler='function(){exportData("csv");}')
self.items.extend([
self.xls,
self.csv,
])
self.init_component()
class LiveGridTopBar(containers.ExtToolBar):
"""
Внутренний класс для удобной работы топбаром грида
"""
def __init__(self, *args, **kwargs):
super(ExtMultiGroupinGrid.LiveGridTopBar, self).__init__(
*args, **kwargs)
self._ext_name = "Ext.ux.grid.livegrid.Toolbar"
self.button_new = controls.ExtButton(
text=u'Добавить',
icon_cls='add_item',
handler='topBarNew'
)
self.button_edit = controls.ExtButton(
text=u'Изменить',
icon_cls='edit_item',
handler='topBarEdit'
)
self.button_delete = controls.ExtButton(
text=u'Удалить',
icon_cls='delete_item',
handler='topBarDelete'
)
self.button_export = controls.ExtButton(
text=u'Экспорт',
icon_cls='icon-table-go',
menu=ExtMultiGroupinGrid.GridExportMenu(
)
)
self.items.extend([
self.button_new,
self.button_edit,
self.button_delete,
self.button_export,
])
self.init_component()
# Поле в котором будет содержаться значение ключа группировки
# должно отличаться от ключевого поля Store,
# т.к. должно содержать совсем другие данные
data_id_field = 'id'
# Поле отображаемое вместо идентификатора группировки
# (по-умолчанию отображается сам идентификатор)
data_display_field = 'id'
# Поля группировки по-умолчанию (список имен полей)
grouped = None
def __init__(self, *args, **kwargs):
super(ExtMultiGroupinGrid, self).__init__(*args, **kwargs)
self.template = 'ext-grids/ext-multigrouping-grid.js'
# Для данных
self.action_data = None
self.action_new = None # Экшен для новой записи
self.action_edit = None # Экшен для изменения
self.action_delete = None # Экшен для удаления
self.action_export = None # Экшен для экспорта
# Поля для id записи
self.row_id_name = 'row_id'
# Обработчик двойного клика
self.dblclick_handler = 'onEditRecord'
# Топ бар для грида
self._top_bar = ExtMultiGroupinGrid.LiveGridTopBar()
# Признак того, маскировать ли грид при загрузки
self.load_mask = True
# Стор для загрузки данных
self.store = misc.store.ExtMultiGroupingStore(
auto_load=True, root='rows', id_property='index')
# Начальный перечень сгруппированных колонок
self.grouped = []
# Признак редактирования на клиенте
# - особенным образом обрабатываются данные при редактировании
self.local_edit = False
# Признак отображения всплывающих подсказок
self.show_tooltips = True
# Признак возможности группировки (показывает панель)
self.groupable = True
# Признак отображения информации о записях
self.display_info = True
# Формат отображения информации о записях
self.display_message = u'Показано {0}-{1} из {2}'
# Объем буфера записей - должен быть больше,
# чем число соседних элементов + число видимых строк
self.buffer_size = 200
# Число соседних элементов сверху и снизу от видимой области,
# для предотвращения запросов.
# Обычно 1/4 или 1/2 от объема буфера
self.near_limit = 100
# Стиль заголовка колонки.
# Применяется ко всем колонкам. Например: 'text-align: center;'
self.header_style = ''
# Url для пака
self.url_data = self.url_new = self.url_edit = self.url_delete = None
self.init_component()
def render(self):
self.store.url = self.store.url or self.url_data
assert self.store.url or self.action_data, 'Action data is not defined'
# тонкая настройка self.store
if not self.store.url and self.action_data:
self.store.url = get_url(self.action_data)
self.render_base_config()
self.render_params()
return render_component(self)
def render_base_config(self):
# Добавление происходит перед вызовом родителя,
# т.к. в нём _view_config уже будет сериализован
if self.header_style:
self._view_config['headerStyle'] = self.header_style
super(ExtMultiGroupinGrid, self).render_base_config()
def render_params(self):
super(ExtMultiGroupinGrid, self).render_params()
data_url = self.url_data or (get_url(
self.action_data) if self.action_data else None)
def remove(this):
if this in self._top_bar.items:
self._top_bar.items.remove(this)
new_url = self.url_new or (get_url(
self.action_new) if self.action_new else None)
if not new_url:
remove(self._top_bar.button_new)
edit_url = self.url_edit or (get_url(
self.action_edit) if self.action_edit else None)
if not edit_url:
remove(self._top_bar.button_edit)
else:
self.handler_dblclick = self.dblclick_handler
delete_url = self.url_delete or (get_url(
self.action_delete) if self.action_delete else None)
if not delete_url:
remove(self._top_bar.button_delete)
export_url = get_url(
self.action_export) if self.action_export else None
if not self.action_export:
remove(self._top_bar.button_export)
context_json = (
self.action_context.json if self.action_context else None)
for args in (
(
'actions',
{
'dataUrl': data_url,
'newUrl': new_url,
'editUrl': edit_url,
'deleteUrl': delete_url,
'exportUrl': export_url,
'contextJson': context_json
}
),
(
'groupedColumns',
lambda: '[%s]' % ','.join(
["'%s'" % col for col in self.grouped]
)
),
('dataIdField', self.data_id_field),
('dataDisplayField', self.data_display_field),
('toolbar', self._top_bar.t_render_items),
('rowIdName', self.row_id_name),
('localEdit', self.local_edit),
('groupable', self.groupable),
('showTooltips', self.show_tooltips),
('displayInfo', self.display_info),
('displayMsg', self.display_message),
('bufferSize', self.buffer_size),
('nearLimit', self.near_limit),
):
self._put_params_value(*args)
def t_render_base_config(self):
return self._get_config_str()
def t_render_params(self):
return self._get_params_str()
def t_render_plugins(self):
if self.show_tooltips:
tooltips = []
for column in self.columns:
if column.tooltip:
tooltips.append(column.tooltip)
elif column.data_index:
tooltips.append({
'field': column.data_index,
'tpl': '{%s}' % column.data_index
})
self.plugins.append(
'new Ext.ux.plugins.grid.CellToolTips(%s)' % json.dumps(tooltips))
return super(ExtMultiGroupinGrid, self).t_render_plugins()
@property
def handler_beforenew(self):
return self._listeners.get('beforenewrequest')
@handler_beforenew.setter
def handler_beforenew(self, function):
self._listeners['beforenewrequest'] = function
@property
def handler_beforeedit(self):
return self._listeners.get('beforeeditrequest')
@handler_beforeedit.setter
def handler_beforeedit(self, function):
self._listeners['beforeeditrequest'] = function
def _make_read_only(
self, access_off=True, exclude_list=(), *args, **kwargs):
super(ExtMultiGroupinGrid, self)._make_read_only(
access_off, exclude_list, *args, **kwargs)
if (self._top_bar and hasattr(self._top_bar, 'items')
and self._top_bar.items and hasattr(
self._top_bar.items, '__iter__')):
for item in self._top_bar.items:
if isinstance(item, ExtUIComponent):
item.make_read_only(
self.read_only,
exclude_list,
*args, **kwargs
)
class ExtObjectSelectionPanel(containers.ExtContainer):
"""
Класс, совмещающий возможность ObjectGrid'a
и возможности выбора и запоминания значений в случае Paging'a
"""
def __init__(self,
grid=None,
selection_columns=None,
selection_grid_conf=None,
*args,
**kwargs):
super(ExtObjectSelectionPanel, self).__init__(*args, **kwargs)
#self.xtype = 'object-selection-panel'
self.layout = 'border'
self.grid = grid
self.selection_grid_conf = selection_grid_conf or {}
self.selection_columns = selection_columns or []
self._ext_name = 'Ext.m3.ObjectSelectionPanel'
def render_base_config(self):
super(ExtObjectSelectionPanel, self).render_base_config()
assert self.grid, 'Grid should be define'
if not isinstance(self.grid.sm, ExtGridCheckBoxSelModel):
self.grid.sm = ExtGridCheckBoxSelModel()
self._put_config_value('grid', self.grid.render)
self._put_config_value('selectionColumns', lambda: json.dumps(self.selection_columns))
self._put_config_value('selectionGridConf', self._render_selection_conf)
def _render_selection_conf(self):
"""
Рендеринг параметров грида для выделения записей
:return: str
"""
res = []
for k, v in self.selection_grid_conf.items():
if isinstance(v, BaseExtComponent):
res.append('%s:%s' % (k, v.render()))
else:
res.append('%s:%s' % (k, json.dumps(v)))
return '{%s}' % ','.join(res)
| {
"content_hash": "685932b9c1a561043da5c25833dea4c2",
"timestamp": "",
"source": "github",
"line_count": 698,
"max_line_length": 94,
"avg_line_length": 34.86819484240688,
"alnum_prop": 0.555592078231572,
"repo_name": "barsgroup/m3-ext",
"id": "c5a623dac66c58af45602990b16b531bd122dfe3",
"size": "27302",
"binary": false,
"copies": "1",
"ref": "refs/heads/deprecated/2.0.7.x",
"path": "src/m3_ext/ui/panels/grids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132105"
},
{
"name": "HTML",
"bytes": "18203"
},
{
"name": "JavaScript",
"bytes": "1532142"
},
{
"name": "Python",
"bytes": "468313"
}
],
"symlink_target": ""
} |
from .iterable import iterable
from itertools import islice
from collections import deque
from strict_functions import strict_globals
@strict_globals(iterable=iterable, islice=islice, deque=deque)
def all_subslices(itr):
""" generates every possible slice that can be generated from an iterable """
assert iterable(itr), 'generators.all_subslices only accepts iterable arguments, not {}'.format(itr)
if not hasattr(itr, '__len__'): # if itr isnt materialized, make it a deque
itr = deque(itr)
len_itr = len(itr)
for start,_ in enumerate(itr):
d = deque()
for i in islice(itr, start, len_itr): # how many slices for this round
d.append(i)
yield tuple(d)
del iterable
del islice
del deque
del strict_globals
| {
"content_hash": "044019a3a7db52409c7600a377a551d2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 104,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.6915167095115681,
"repo_name": "CodyKochmann/generators",
"id": "e8f1bc3e4eab201d0b93bee7d5bd3b5d2619209c",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generators/all_subslices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85464"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from threading import Lock
from redis import Redis
from .base import BaseBackend
lock = Lock()
class RedisBackend(BaseBackend):
def __init__(self, host='localhost', port=6379, db=0, *args, **kw):
self.client = Redis(host=host, port=port, db=db)
BaseBackend.__init__(self, *args, **kw)
def __getitem__(self, key):
with lock:
raw = self.client.get(self.prefixed_key(key))
if raw:
return self.deserialize(raw)
else:
raise KeyError('key %r not found' % key)
def __setitem__(self, key, value):
raw = self.serialize(value)
with lock:
self.client.set(self.prefixed_key(key), raw)
| {
"content_hash": "ef7d55ad2dcf0140f5b82db6c0f5d7a1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 27.79310344827586,
"alnum_prop": 0.5905707196029777,
"repo_name": "storborg/gimlet",
"id": "5f982ba8d3ae17a3169584dd1bf3373b35524791",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gimlet/backends/pyredis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47222"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from weekly.models import Event, StdEvent
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import datetime
from django.contrib.auth.models import User
def index(request):
today = datetime.date.today()
monday = today - datetime.timedelta(days=today.weekday())
event_list = Event.objects.filter(workdate__gte=monday)
std_list = StdEvent.objects.all()
users = User.objects.filter(is_superuser__exact=0,is_active__exact=1)
context_dict = { 'events': event_list, 'std_events': std_list, 'users': users }
return render(request, 'weekly/index.html', context_dict)
| {
"content_hash": "3f70b2475cd44b93b47b38ca6887fdaf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 46.875,
"alnum_prop": 0.7586666666666667,
"repo_name": "lhirdman/planner",
"id": "712b812da0b5cb10dbd246aa79dbad106ca799a9",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weekly/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4995"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "6148"
}
],
"symlink_target": ""
} |
from AnkiHubLibs import webbrowser
import sys
sys.path.append("./AnkiHubLibs")
from AnkiHubLibs import AnkiHub
AnkiHubServer = AnkiHub.AnkiHubServer
configFileName = AnkiHub.configFileName
cookieFileName = AnkiHub.cookieFileName
from urllib2 import Request, urlopen, URLError, HTTPError
from pprint import pprint
import json
import urllib
import threading
import time
#import pickle
import os
# import the main window object (mw) from ankiqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
# import the text importer to import text files as decks
from anki.importing import TextImporter
# import cards
import anki.cards
#import anki.utils
from anki.utils import intTime, timestampID, guid64
#import copy for deepcopy
import copy
###############################################################
# Wrapper for QWidget to overwrite closeEvent function. #
###############################################################
class AnkiWidget(QWidget):
def __init__(self, AnkiHubInstance, parent=None):
super(AnkiWidget, self).__init__(parent)
self.ankiHubInstance = AnkiHubInstance
def closeEvent(self, event):
self.ankiHubInstance.terminate()
super(AnkiWidget, self).closeEvent(event)
###############################################################
# Main program for AnkiHub #
###############################################################
class AnkiHub:
'''
Instance/global variables.
'''
deckCol = []
server = None
username = ''
sessionToken = ''
'''
Initial entry point of function. Should be the only function called by global.
'''
def initialize(self):
#TO-DO: Create a destructor to clear data when the QWidget is closed. Currently hacking by manually clearing instance variables.
self.createLoginWindow()
'''
Destructor function to clean data when closing AnkiHub window.
'''
def terminate(self):
self.server.terminate()
self.deckCol = []
#self.terminate()
####################################################################################
# GUI setup methods. Creates the QT widget that holds all AnkiHub functionality. #
####################################################################################
'''
Creates the login window.
'''
def createLoginWindow(self):
mw.login = QWidget()
mw.login.resize(500, 250);
mw.login.setWindowTitle('AnkiHub Login')
mw.login.instructions = QLabel('Please input your username and password.', mw.login)
mw.login.instructions.move(30, 30)
mw.login.usernameLabel = QLabel('Username:', mw.login)
mw.login.usernameLabel.move(30, 100)
mw.login.username = QLineEdit(mw.login)
mw.login.username.resize(300,30)
mw.login.username.move(150, 100)
mw.login.passwordLabel = QLabel('Password:', mw.login)
mw.login.passwordLabel.move(30, 150)
mw.login.password = QLineEdit(mw.login)
mw.login.password.setEchoMode(QLineEdit.Password)
mw.login.password.resize(300,30)
mw.login.password.move(150, 150)
mw.login.signup = QPushButton('Register', mw.login)
mw.login.signup.move(100,200)
mw.login.signup.clicked.connect(self.connect('Signup'))
mw.login.submit = QPushButton('Login', mw.login)
mw.login.submit.move(300,200)
mw.login.submit.clicked.connect(self.connect('Login'))
mw.login.show()
'''
Creates the deck settings window.
'''
def createSettings(self):
mw.settings = AnkiWidget(self)
mw.settings.resize(1024, 520)
mw.settings.setWindowTitle('AnkiHub Settings')
mw.settings.userLabel = QLabel(self.username + ' - Decks', mw.settings)
mw.settings.userLabel.move(64, 32)
self.createTree()
mw.settings.redirect = QPushButton('Go to AnkiHub', mw.settings)
mw.settings.redirect.clicked.connect(self.redirect())
mw.settings.redirect.move(440, 460)
# Deck download
mw.settings.download = QPushButton('Download a Deck', mw.settings)
mw.settings.download.clicked.connect(self.importDeck())
mw.settings.download.move(200, 460)
# Refresh info to include new local changes
mw.settings.refresh = QPushButton('Refresh', mw.settings)
mw.settings.refresh.clicked.connect(self.refresh())
mw.settings.refresh.move(650, 460)
mw.settings.show()
'''
Creates tree view to display deck hierarchy.
'''
def createTree(self):
mw.settings.deckTree = QTreeWidget(mw.settings)
deckTree = mw.settings.deckTree
deckTree.resize(896, 384)
deckTree.move(64, 64)
header = QTreeWidgetItem(['Decks', ''])
deckTree.setHeaderItem(header)
deckTree.setColumnWidth(0,750)
for rootDeck in self.deckCol:
treeNode = QTreeWidgetItem(deckTree)
treeNode.setText(0, rootDeck['name'])
treeButton = QPushButton('Sync')
treeButton.clicked.connect(self.syncDeck(rootDeck))
deckTree.setItemWidget(treeNode, 1, treeButton)
self.createTreeChildren(deckTree, rootDeck, treeNode)
'''
Helper method to add children to parent decks.
'''
def createTreeChildren(self, deckTree, parentDeck, parentNode):
for child in parentDeck['children']:
treeNode = QTreeWidgetItem(parentNode)
treeNode.setText(0, child['name'])
treeButton = QPushButton('Sync')
treeButton.clicked.connect(self.syncDeck(child))
deckTree.setItemWidget(treeNode, 1, treeButton)
self.createTreeChildren(deckTree, child, treeNode)
'''
Creates loading window for visual feedback on data processing.
'''
def createLoadingScreen(self):
mw.loading = QWidget()
mw.loading.resize(275, 100)
mw.loading.loadingLabel = QLabel('Loading, please wait...', mw.loading)
mw.loading.loadingLabel.move(30, 30)
mw.loading.show()
mw.loading.repaint()
'''
Creates dialog for syncing/uploading decks.
'''
def createSyncScreen(self, deckName, syncThread):
syncLabel = QLabel('Syncing deck "%s", please wait...' % deckName)
syncLabel.show()
syncLabel.repaint()
syncThread.join()
###################################################
# Callback functions and API calls. #
###################################################
def getTransactions(self, gid):
try:
jsonResponse = self.server.getTransactions(gid)
return jsonResponse
except HTTPError, e:
showInfo(str('Transaction Download Error: %d - %s' % (e.code, str(json.loads(e.read())))))
except URLError, e:
showInfo(str(e.args))
def uploadTranasactions(self, gid, transactions):
# GET request to ankihub.herokuapp.com/api/decks?name=deckName
try:
jsonResponse = self.server.postTransactions(gid, transations)
except HTTPError, e:
showInfo(str('Transaction Upload Error: %d - %s' % (e.code, str(json.loads(e.read())))))
except URLError, e:
showInfo(str(e.args))
# Get JSON copy of local deck (processDeck)
# Pass JSON from request and local copy of deck to transactionCalculator
# POST request to transations endpoint
def getCID(self, id):
return id.split(":")[2]
def getCardNote(self, data):
card = mw.col.getCard(self.getCID(data["on"]))
note = card.note(reload=True)
return (card,note)
def processTransactions_UPDATE(self, data):
card, note = self.getCardNote(data)
note.fields[0] = data["front"]
note.fields[1] = data["back"]
card.flush()
note.flush()
def processTransactions_aKEYWORDS(self, data):
pass
def processTransactions_rKEYWORDS(self, data):
pass
def processTransactions_cKEYWORDS(self, data):
pass
def processTransactions_aNOTES(self, data):
pass
def processTransactions_rNOTES(self, data):
pass
def processTransactions_cNOTES(self, data):
pass
def processTransactions_aTAGS(self, data):
card, note = self.getCardNote(data)
for i in data["tags"]:
note.addTag(i)
note.flush()
def processTransactions_rTAGS(self, data):
card, note = self.getCardNote(data)
for i in data["tags"]:
note.delTag(i)
note.flush()
def processTransactions_cTAGS(self, data):
card, note = self.getCardNote(data)
del note.tags[:]
note.flush()
def processTransactions_GETACTIONS(self, data):
pass
def processTransactions_DELETE(self, data):
mw.col.remCards([self.getCID(data["on"])], notes=True)
mw.col.decks.flush()
#untested with server
CARD_QUERIES = {"UPDATE":processTransactions_UPDATE, "aKEYWORDS":processTransactions_aKEYWORDS, "rKEYWORDS":processTransactions_rKEYWORDS, "cKEYWORDS":processTransactions_cKEYWORDS, "aNOTES":processTransactions_aNOTES, "rNOTES":processTransactions_rNOTES, "cNOTES":processTransactions_cNOTES, "aTAGS":processTransactions_aTAGS, "rTAGS":processTransactions_rTAGS, "cTAGS":processTransactions_cTAGS, "GETACTIONS":processTransactions_GETACTIONS, "DELETE":processTransactions_DELETE}
def processTransactions(self, transactions):
# transactions is an array
transactions.sort(cmp=compare)
for t in transactions:
if t["query"] in self.CARD_QUERIES:
self.CARD_QUERIES[t["query"]](self, t["data"])
else:
pass # uh oh, unsupported query
mw.reset()
def getDID(self, gid):
return gid.split(":")[1]
#??
def processDeckTransactions_FORK(self, data):
orig_did = self.getDID(data["on"])
# orig_deck = mw.col.decks.get(orig_did)
new_did = mw.col.decks.id(data["data"]["name"])
new_deck = mw.col.decks.get(new_did)
for cid in mw.col.decks.cids(orig_did):
card = mw.col.getCard(cid)
note = card.note(reload=True)
model = note.model()
createNewModel = False # create new model?
if createNewModel:
new_model = mw.col.models.copy(model) # models.copy saves
new_note = copy.deepcopy(note)
new_note.col = note.col
new_note.id = timestampID(mw.col.db, "notes")
new_note.guid = guid64()
if createNewModel:
new_note._model = new_model
new_note.mid = new_model['id']
new_note.flush()
new_card = copy.deepcopy(card)
new_card.col = card.col
new_card.id = timestampID(mw.col.db, "cards")
new_card.crt = intTime()
new_card.did = new_did
new_card.nid = new_note.id
new_card.flush()
mw.col.decks.save(new_deck)
mw.col.decks.flush()
#TO-DO: Update this to match Tyler's new card schema
def processDeckTransactions_ADD(self, data):
for c in data["data"]["newCards"]:
card = anki.cards.Card(mw.col)
note = mw.col.newNote()
# use front/back or notes?
note.fields[0] = c["front"]
note.fields[1] = c["back"]
for i in c["tags"]:
note.addTag(i)
note.flush()
# set CID?
card.nid = note.id
card.ord = 0 # what the hell is ord?
card.did = self.getDID(data["on"])
card.due = 1
card.flush()
#??
def processDeckTransactions_REMOVE(self, data):
mw.col.remCards([self.getCID(data["data"]["gid"])])
#works
def processDeckTransactions_RENAME(self, data):
mw.col.decks.rename(mw.col.decks.get(self.getDID(data["on"])), data["data"]["name"])
def processDeckTransactions_REDESC(self, data):
pass
def processDeckTransactions_GETACTIONS(self, data):
pass
#??
def processDeckTransactions_DELETE(self, data):
mw.col.decks.rem(self.getDID(data["on"]), cardsToo = True)
def processDeckTransactions_aKEYWORDS(self, data):
pass
def processDeckTransactions_rKEYWORDS(self, data):
pass
def processDeckTransactions_cKEYWORDS(self, data):
pass
def processDeckTransactions_REPUB(self, data):
pass
DECK_QUERIES = {"FORK":processDeckTransactions_FORK, "ADD":processDeckTransactions_ADD, "REMOVE":processDeckTransactions_REMOVE, "RENAME":processDeckTransactions_RENAME,"REDESC":processDeckTransactions_REDESC, "GETACTIONS":processDeckTransactions_GETACTIONS, "DELETE":processDeckTransactions_DELETE, "aKEYWORDS":processDeckTransactions_aKEYWORDS, "rKEYWORDS":processDeckTransactions_rKEYWORDS, "cKEYWORDS":processDeckTransactions_cKEYWORDS, "REPUB":processDeckTransactions_REPUB}
def processDeckTransactions(self, transactions):
# transactions is an array
# need to sort transactions by timestamp/grouping here
transactions.sort(cmp=compare)
for t in transactions:
if t["query"] in self.DECK_QUERIES:
self.DECK_QUERIES[t["query"]](self, t)
else:
pass # uh oh, unsupported query
mw.reset()
def getAllDeckNames(self):
decks = mw.col.decks.all()
return "\n".join([i["name"] for i in decks])
def testTransactions(self):
# basic transaction = {"query":"", "data":{}}
# # test deck rename
# showInfo(self.getAllDeckNames())
# showInfo(str(mw.col.decks.id("forked", create=False)))
# transactions = [{"query":"RENAME", "data":{"gid":"joseph:1459643823643", "name":"renamed"}}]
# # test deck remove
# cid = "joseph:1459643823643:" + str(mw.col.db.first("select * from cards where did = ?", 1459643823643)[0])
# transactions = [{"query":"REMOVE", "data":{"id":cid}}]
# # test deck add
# transactions = [{"query":"ADD", "data":{"gid":"joseph:1459643823643", "front":"this is the front", "back":"this_is_the_back", "tags":["one_tag","two_tag","three_tags"]}}]
# # test deck fork
# transactions = [{"query":"FORK", "data":{"gid":"joseph:1459643823643", "name":"forked"}}]
# showInfo(str(mw.col.decks.id("forked", create=False)))
# # test deck remove
# transactions = [{"query":"DELETE", "data":{"gid":"joseph:1460136150946"}}]
# self.processDeckTransactions(transactions)
# # test card update
# showInfo(str(mw.col.decks.cids(1459643823643)[0]))
# transactions = [{"query":"UPDATE", "data":{"id":"joseph:1460136150946:1460136040703", "front":"updated front", "back":"updated_back"}}]
# # test card aTags
# transactions = [{"query":"aTAGS", "data":{"id":"joseph:1460136150946:1460136040703", "tags":["new_tag_1", "new_tag_2"]}}]
# # test card rTags
# transactions = [{"query":"rTAGS", "data":{"id":"joseph:1460136150946:1460136040703", "tags":["new_tag_1", "new_tag_2"]}}]
# # test card cTags
# transactions = [{"query":"cTAGS", "data":{"id":"joseph:1460136150946:1460136040703"}}]
# # test card delete
# transactions = [{"query":"DELETE", "data":{"id":"joseph:1460136150946:1460138220929"}}]
# self.processTransactions(transactions)
pass
def saveTime(self, gid):
current_data = ''
with open(self.local_file_name, "r") as f:
for line in f:
if gid not in line:
current_data += line
with open(self.local_file_name, "w") as f:
f.write(current_data)
f.write("{}{}{}".format(gid, self.default_seperator, datetime.datetime.utcnow()))
def loadTime(self, gid):
with open(self.local_file_name, "a+") as f:
pass
with open(self.local_file_name, "r") as f:
for line in f:
if gid in line:
return line.split(self.default_seperator)[1]
'''
Callback function for Sync button. Uses multithreading to process POST requests to /api/decks/
'''
def syncDeck(self, deck):
# Temp Call to getTrans
def syncDeckAction():
#In order for transactions to work, threading must be turned off
#syncThread = threading.Thread(target=self.recursiveSync, args=('Sync', deck))
#loadThread = threading.Thread(target=self.createSyncScreen, args=(deck['name'], syncThread))
#try:
#syncThread.start()
#loadThread.start()
self.recursiveSync('Sync', deck)
#except:
#showInfo('Could not start sync thread')
return syncDeckAction
'''
Callback function to redirect user to AnkiHub.
'''
def redirect(self):
def redirectAction():
showInfo('Redirecting to AnkiHub')
webbrowser.open('http://ankihub.herokuapp.com')
return redirectAction
'''
Callback function to refresh settings window
'''
def refresh(self):
def refreshAction():
self.deckCol = []
self.processDecks()
mw.loading.close()
self.createSettings()
return refreshAction
'''
Callback function that makes POST requests to /api/users/login/ or /api/users/signup/
'''
def connect(self, endpoint):
def connectAction():
self.createLoadingScreen()
self.username = mw.login.username.text()
password = mw.login.password.text()
try:
jsonResponse = None
if 'Login' in endpoint:
jsonResponse = self.server.login(self.username, password)
else:
jsonResponse = self.server.signup(self.username, password)
mw.login.close()
self.username = jsonResponse['user']['username']
self.sessionToken = jsonResponse['user']['sessionToken']
showInfo('Success! Logged in as ' + jsonResponse['user']['username'])
self.getSubscribeDecks(jsonResponse['user']['subscriptions'])
self.processDecks()
mw.loading.close()
self.createSettings()
except HTTPError, e:
showInfo(str('%s Error: %d - %s' % (endpoint, e.code, json.loads(e.read()))))
except URLError, e:
showInfo(str(e.args))
return connectAction
'''
GET request to get decks that a user is subscribed to.
'''
def getSubscribeDecks(self, subs):
for sub in subs:
try:
jsonResponse = self.server.getDeck(sub)
# Uncomment this line to see data in retrieved deck
#showInfo('Success! Result is ' + str(jsonResponse[0]))
if len(jsonResponse) > 0:
deck = jsonResponse[0]
cards = deck['cards']
toFile = ''
for card in cards:
toFile += '%s; %s;\n' % (card['notes']['Front'], card['notes']['Back'])
directory = os.path.dirname(__file__)
filename = directory + '\import.txt'
file = open(filename, 'w')
file.write(toFile)
file.close()
self.importDeckFromCSV(filename, deck['name'])
#self.deckCol.append(deck) # Adds retrieved deck to internal AnkiHub Deck Collection
except HTTPError, e:
showInfo(str('Subscription Download Error: %d - %s' % (e.code, str(json.loads(e.read())))))
except URLError, e:
showInfo(str(e.args))
'''
Allows for general requests (both GET and POST) to be made asynchronously when used as target for threads. Currently only used for Sync.
'''
def processRequest(self, requestFrom, request):
try:
response = urlopen(request)
jsonResponse = json.loads(response.read())
showInfo('%s Request Successful!' % requestFrom)
except HTTPError, e:
showInfo(str('%s Error: %d - %s' % (requestFrom, e.code, e.read())))
print(str('%s Error: %d - %s' % (requestFrom, e.code, e.read())))
except URLError, e:
showInfo(str(e.args))
print(str(e.args))
'''
Allows for general requests (both GET and POST) to be made asynchronously when used as target for threads. Currently only used for Sync.
'''
def recursiveSync(self, requestFrom, deck):
deckCopy = deck.copy()
try:
jsonResponse = self.server.recursiveSync(requestFrom, deck)
showInfo('%s Request Successful!' % requestFrom)
return jsonResponse
except HTTPError, e:
if e.code == 400:
#lastSync = self.loadTime(deckCopy['gid'])
transactions = self.getTransactions(deckCopy['gid'])
self.processDeckTransactions(transactions)
showInfo('Finished processing transactions')
return {'gid' : deckCopy['gid']}
except URLError, e:
showInfo(str(e.args))
return {'gid' : 'error'}
'''
Kicks off deck import process
'''
def importDeck(self):
def importDeckAction():
self.createDeckImportWindow()
return importDeckAction
'''
Window for inputting deck download link
'''
def createDeckImportWindow(self):
mw.download = QWidget()
mw.download.resize(500, 250);
mw.download.setWindowTitle('Download Deck from AnkiHub')
mw.download.instructions = QLabel('Please input download link.', mw.download)
mw.download.instructions.move(30, 30)
mw.download.linkLabel = QLabel('Link:', mw.download)
mw.download.linkLabel.move(30, 100)
mw.download.link = QLineEdit(mw.download)
mw.download.link.resize(300,30)
mw.download.link.move(150, 100)
mw.download.submit = QPushButton('download', mw.download)
mw.download.submit.move(300,200)
mw.download.submit.clicked.connect(self.downloadDeck())
mw.download.show()
'''
GET request to download link
'''
def downloadDeck(self):
def downloadAction():
self.createLoadingScreen()
requestURL = mw.download.link.text()
req = Request(requestURL, None, {'Content-Type' : 'application/json'})
try:
response = urlopen(req)
# Write to file
with open("%s.txt" % requestURL) as f:
f.write(response)
mw.download.close()
showInfo('Success! Deck downloaded')
mw.loading.close()
#self.importDeckFromCSV()
except HTTPError, e:
showInfo(str('Deck Download Error: %d - %s' % (e.code, json.loads(e.read()))))
except URLError, e:
showInfo(str(e.args))
return downloadAction
'''
CSV to Anki deck importer. If the note type has multiple card types,
multiple cards will automatically be generated for each note.
'''
def importDeckFromCSV(self, filename, name):
#file = r"C:\Users\aarun\OneDrive\Documents\Anki\addons\import.txt"
# select deck
did = mw.col.decks.id(name)
#model = self.addNewModel()
mw.col.decks.select(did)
# set note type for deck
model = mw.col.models.byName("Basic")
deck = mw.col.decks.get(did)
deck['mid'] = model['id']
mw.col.decks.save(deck)
# Assign new deck to model
mw.col.models.setCurrent(model)
model['did'] = did
mw.col.models.save(model)
# import into the collection
ti = TextImporter(mw.col, filename)
ti.initMapping()
ti.run()
mw.col.reset()
mw.reset()
'''
Add new custom note type, card type, and templates (collectively a "model").
'''
def addNewModel(self):
models = mw.col.models # models = note types
m = models.new("Test")
fm = models.newField("Foo")
models.addField(m, fm)
fm = models.newField("Bar")
models.addField(m, fm)
fm = models.newField("Baz")
models.addField(m, fm)
t = models.newTemplate("Card 1") # template = card type
t['qfmt'] = "{{Foo}}" # qfmt = front template
t['afmt'] = "{{FrontSide}}\n\n<hr id=answer>\n\n{{Bar}}" # afmt = back template
models.addTemplate(m, t)
t = models.newTemplate("Card 2")
t['qfmt'] = "{{Bar}}"
t['afmt'] = "{{FrontSide}}\n\n<hr id=answer>\n\n{{Baz}}"
models.addTemplate(m, t)
models.add(m)
return m
#################################################
# Algorithms to serialize JSONs. #
#################################################
'''
Main function to process decks. Gets decks from Anki and creates the overall JSON.
'''
def processDecks(self):
decks = mw.col.decks.all() #decks from local anki
deckDict = {} #AnkiHub dictionary of processed decks: name -> json object
for deckObj in decks:
if deckObj['name'] not in deckDict: #if we haven't processed this deck yet
deckDict[deckObj['name']] = {} #create a json-object for that deck's name
self.initializeDeckValues(deckDict[deckObj['name']], deckObj) #fill empty deck-json with values
deck = deckDict[deckObj['name']] #deck is processed, get our json object value
parents = mw.col.decks.parents(deckObj['id']) #get list of parents for deck
if not parents:
self.deckCol.append(deck) #no parents, deck is top level, just add to our master deck list
else:
if parents[-1]['name'] not in deckDict: #check if immediate parent is not processed
deckDict[parents[-1]['name']] = {} #process immediate parent as above
self.initializeDeckValues(deckDict[parents[-1]['name']], parents[-1])
deckDict[parents[-1]['name']]['children'].append(deck) #add deck-json to parent-json's children list, don't add to master list yet
'''
Initializer function to create a deck with the proper fields.
'''
def initializeDeckValues(self, deckDict, deck):
deckDict['sessionToken'] = self.sessionToken
deckDict['gid'] = '%s:%d' % (self.username, deck['id'])
deckDict['did'] = str(deck['id'])
deckDict['description'] = deck['desc']
deckDict['name'] = deck['name']
deckDict['keywords'] = []
deckDict['isPublic'] = True
deckDict['owner'] = self.username
deckDict['children'] = []
deckDict['newCards'] = []
self.populateCards(deck, deckDict['newCards'])
'''
Initializer function to create a card with the proper fields.
'''
def populateCards(self, deck, cardList):
cardIds = mw.col.decks.cids(deck['id'])
for cardId in cardIds:
card = mw.col.getCard(cardId)
cardDict = {}
cardDict['gid'] = str('%s:%d' % (self.username, deck['id']))
cardDict['did'] = str('%s' % ( deck['id']))
cardDict['cid'] = str(cardId)
cardDict['front'] = (card.template())['qfmt']
cardDict['back'] = (card.template())['afmt']
cardDict['style'] = card.css()
cardDict['notes'] = {}
self.parseNotes(deck['id'], card, cardDict['notes'])
cardDict['tags'] = []
self.parseTags(cardId, cardDict['tags'])
cardDict['keywords'] = []
cardList.append(cardDict)
'''
Helper function to parse the notes of a card.
'''
def parseNotes(self, deckId, card, noteList):
note = card.note()
'''
#Tagging experiment, please ignore
if 'gid' not in note.items():
tag = '%s:%d' % (self.username, deckId)
note.addTag(tag)
#showInfo(note.stringTags())
note.flush()
'''
for (name, value) in note.items():
noteList[name] = value
'''
Helper function to parse the tags of a card.
'''
def parseTags(self, cardId, tagList):
query = 'select n.tags from cards c, notes n WHERE c.nid = n.id AND c.id = ?'
response = mw.col.db.list(query, cardId)
tags = list(set(mw.col.tags.split(' '.join(response))))
for tag in tags:
tagList.append(tag)
#############################################################
# Anki runs from here and calls our functions. #
#############################################################
def compare(trans1, trans2):
if trans1['updatedAt'] < trans2['updatedAt']:
return -1
elif trans1['updatedAt'] > trans2['updatedAt']:
return 1
elif trans1['index'] < trans2['index']:
return -1
elif trans1['index'] > trans2['index']:
return 1
else:
return 0
QCoreApplication.setAttribute(Qt.AA_X11InitThreads)
ankiHub = AnkiHub()
#if os.path.isfile(configFileName):
#cD = pickle.load(open(configFileName, "rb"))
#else:
cD = {}
#if os.path.isfile(cookieFileName):
# cook = pickle.load(open(cookieFileName, "rb"))
# ankiHub.server = AnkiHubServer(cD, cook)
#else:
ankiHub.server = AnkiHubServer(cD)
action = QAction('AnkiHub', mw)
mw.connect(action, SIGNAL('triggered()'), ankiHub.initialize)
mw.form.menuTools.addAction(action)
#action = QAction("AnkiHub Deck Import", mw)
#mw.connect(action, SIGNAL("triggered()"), ankiHub.importDeckFromCSV)
#mw.form.menuTools.addAction(action)
#action = QAction('Test transactions', mw)
#mw.connect(action, SIGNAL('triggered()'), ankiHub.testTransactions)
#mw.form.menuTools.addAction(action)
| {
"content_hash": "3072373af3941e10be8fcc1837709fc3",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 481,
"avg_line_length": 34.20368098159509,
"alnum_prop": 0.6346678146075477,
"repo_name": "CSE-437/AnkiPlugin",
"id": "ff2621975cb941ab992dc26166ee03fb74ff40b6",
"size": "27876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ankiPlugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "161"
},
{
"name": "OpenEdge ABL",
"bytes": "654"
},
{
"name": "Python",
"bytes": "85048"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
from command import Command
from collections import defaultdict
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog [--all | <branchname>] [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def _Options(self, p):
p.add_option('--all',
dest='all', action='store_true',
help='delete all branches in all projects')
def Execute(self, opt, args):
if not opt.all and not args:
self.Usage()
if not opt.all:
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print("error: '%s' is not a valid name" % nb, file=sys.stderr)
sys.exit(1)
else:
args.insert(0,None)
nb = "'All local branches'"
err = defaultdict(list)
success = defaultdict(list)
all_projects = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
if opt.all:
branches = project.GetBranches().keys()
else:
branches = [nb]
for name in branches:
status = project.AbandonBranch(name)
if status is not None:
if status:
success[name].append(project)
else:
err[name].append(project)
pm.end()
width = 25
for name in branches:
if width < len(name):
width = len(name)
if err:
for br in err.keys():
err_msg = "error: cannot abandon %s" %br
print(err_msg, file=sys.stderr)
for proj in err[br]:
print(' '*len(err_msg) + " | %s" % p.relpath, file=sys.stderr)
sys.exit(1)
elif not success:
print('error: no project has local branch(es) : %s' % nb,
file=sys.stderr)
sys.exit(1)
else:
print('Abandoned branches:', file=sys.stderr)
for br in success.keys():
if len(all_projects) > 1 and len(all_projects) == len(success[br]):
result = "all project"
else:
result = "%s" % (
('\n'+' '*width + '| ').join(p.relpath for p in success[br]))
print("%s%s| %s\n" % (br,' '*(width-len(br)), result),file=sys.stderr)
| {
"content_hash": "e54a88cbf9d825cb6dc291dfc0bbe8bd",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 78,
"avg_line_length": 29.42168674698795,
"alnum_prop": 0.583947583947584,
"repo_name": "dodocat/git-repo",
"id": "6f78da746e8e304d2928d98bd2923c58b732044c",
"size": "3045",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "subcmds/abandon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "402429"
},
{
"name": "Shell",
"bytes": "6376"
}
],
"symlink_target": ""
} |
import unittest
import mock
from ._testing import _make_credentials
class TestRow(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row import Row
return Row
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_row_key_getter(self):
row = self._make_one(row_key=b"row_key", table="table")
self.assertEqual(b"row_key", row.row_key)
def test_row_table_getter(self):
row = self._make_one(row_key=b"row_key", table="table")
self.assertEqual("table", row.table)
class Test_SetDeleteRow(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row import _SetDeleteRow
return _SetDeleteRow
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test__get_mutations_virtual(self):
row = self._make_one(b"row-key", None)
with self.assertRaises(NotImplementedError):
row._get_mutations(None)
class TestDirectRow(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row import DirectRow
return DirectRow
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor(self):
row_key = b"row_key"
table = object()
row = self._make_one(row_key, table)
self.assertEqual(row._row_key, row_key)
self.assertIs(row._table, table)
self.assertEqual(row._pb_mutations, [])
def test_constructor_with_unicode(self):
row_key = u"row_key"
row_key_bytes = b"row_key"
table = object()
row = self._make_one(row_key, table)
self.assertEqual(row._row_key, row_key_bytes)
self.assertIs(row._table, table)
def test_constructor_with_non_bytes(self):
row_key = object()
with self.assertRaises(TypeError):
self._make_one(row_key, None)
def test__get_mutations(self):
row_key = b"row_key"
row = self._make_one(row_key, None)
row._pb_mutations = mutations = object()
self.assertIs(mutations, row._get_mutations(None))
def test_get_mutations_size(self):
row_key = b"row_key"
row = self._make_one(row_key, None)
column_family_id1 = u"column_family_id1"
column_family_id2 = u"column_family_id2"
column1 = b"column1"
column2 = b"column2"
number_of_bytes = 1 * 1024 * 1024
value = b"1" * number_of_bytes
row.set_cell(column_family_id1, column1, value)
row.set_cell(column_family_id2, column2, value)
total_mutations_size = 0
for mutation in row._get_mutations():
total_mutations_size += mutation.ByteSize()
self.assertEqual(row.get_mutations_size(), total_mutations_size)
def _set_cell_helper(
self,
column=None,
column_bytes=None,
value=b"foobar",
timestamp=None,
timestamp_micros=-1,
):
import six
import struct
row_key = b"row_key"
column_family_id = u"column_family_id"
if column is None:
column = b"column"
table = object()
row = self._make_one(row_key, table)
self.assertEqual(row._pb_mutations, [])
row.set_cell(column_family_id, column, value, timestamp=timestamp)
if isinstance(value, six.integer_types):
value = struct.pack(">q", value)
expected_pb = _MutationPB(
set_cell=_MutationSetCellPB(
family_name=column_family_id,
column_qualifier=column_bytes or column,
timestamp_micros=timestamp_micros,
value=value,
)
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_set_cell(self):
self._set_cell_helper()
def test_set_cell_with_string_column(self):
column_bytes = b"column"
column_non_bytes = u"column"
self._set_cell_helper(column=column_non_bytes, column_bytes=column_bytes)
def test_set_cell_with_integer_value(self):
value = 1337
self._set_cell_helper(value=value)
def test_set_cell_with_non_bytes_value(self):
row_key = b"row_key"
column = b"column"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
value = object() # Not bytes
with self.assertRaises(TypeError):
row.set_cell(column_family_id, column, value)
def test_set_cell_with_non_null_timestamp(self):
import datetime
from google.cloud._helpers import _EPOCH
microseconds = 898294371
millis_granularity = microseconds - (microseconds % 1000)
timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds)
self._set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity)
def test_delete(self):
row_key = b"row_key"
row = self._make_one(row_key, object())
self.assertEqual(row._pb_mutations, [])
row.delete()
expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB())
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cell(self):
klass = self._get_target_class()
class MockRow(klass):
def __init__(self, *args, **kwargs):
super(MockRow, self).__init__(*args, **kwargs)
self._args = []
self._kwargs = []
# Replace the called method with one that logs arguments.
def _delete_cells(self, *args, **kwargs):
self._args.append(args)
self._kwargs.append(kwargs)
row_key = b"row_key"
column = b"column"
column_family_id = u"column_family_id"
table = object()
mock_row = MockRow(row_key, table)
# Make sure no values are set before calling the method.
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [])
self.assertEqual(mock_row._kwargs, [])
# Actually make the request against the mock class.
time_range = object()
mock_row.delete_cell(column_family_id, column, time_range=time_range)
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [(column_family_id, [column])])
self.assertEqual(mock_row._kwargs, [{"state": None, "time_range": time_range}])
def test_delete_cells_non_iterable(self):
row_key = b"row_key"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
columns = object() # Not iterable
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
def test_delete_cells_all_columns(self):
row_key = b"row_key"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
klass = self._get_target_class()
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, klass.ALL_COLUMNS)
expected_pb = _MutationPB(
delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id)
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_columns(self):
row_key = b"row_key"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
columns = []
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def _delete_cells_helper(self, time_range=None):
row_key = b"row_key"
column = b"column"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
columns = [column]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns, time_range=time_range)
expected_pb = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id, column_qualifier=column
)
)
if time_range is not None:
expected_pb.delete_from_column.time_range.CopyFrom(time_range.to_pb())
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_time_range(self):
self._delete_cells_helper()
def test_delete_cells_with_time_range(self):
import datetime
from google.cloud._helpers import _EPOCH
from google.cloud.bigtable.row_filters import TimestampRange
microseconds = 30871000 # Makes sure already milliseconds granularity
start = _EPOCH + datetime.timedelta(microseconds=microseconds)
time_range = TimestampRange(start=start)
self._delete_cells_helper(time_range=time_range)
def test_delete_cells_with_bad_column(self):
# This makes sure a failure on one of the columns doesn't leave
# the row's mutations in a bad state.
row_key = b"row_key"
column = b"column"
column_family_id = u"column_family_id"
table = object()
row = self._make_one(row_key, table)
columns = [column, object()]
self.assertEqual(row._pb_mutations, [])
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def test_delete_cells_with_string_columns(self):
row_key = b"row_key"
column_family_id = u"column_family_id"
column1 = u"column1"
column1_bytes = b"column1"
column2 = u"column2"
column2_bytes = b"column2"
table = object()
row = self._make_one(row_key, table)
columns = [column1, column2]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
expected_pb1 = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id, column_qualifier=column1_bytes
)
)
expected_pb2 = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id, column_qualifier=column2_bytes
)
)
self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2])
def test_commit(self):
project_id = "project-id"
row_key = b"row_key"
table_name = "projects/more-stuff"
column_family_id = u"column_family_id"
column = b"column"
credentials = _make_credentials()
client = self._make_client(
project=project_id, credentials=credentials, admin=True
)
table = _Table(table_name, client=client)
row = self._make_one(row_key, table)
value = b"bytes-value"
# Perform the method and check the result.
row.set_cell(column_family_id, column, value)
row.commit()
self.assertEqual(table.mutated_rows, [row])
class TestConditionalRow(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row import ConditionalRow
return ConditionalRow
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor(self):
row_key = b"row_key"
table = object()
filter_ = object()
row = self._make_one(row_key, table, filter_=filter_)
self.assertEqual(row._row_key, row_key)
self.assertIs(row._table, table)
self.assertIs(row._filter, filter_)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
def test__get_mutations(self):
row_key = b"row_key"
filter_ = object()
row = self._make_one(row_key, None, filter_=filter_)
row._true_pb_mutations = true_mutations = object()
row._false_pb_mutations = false_mutations = object()
self.assertIs(true_mutations, row._get_mutations(True))
self.assertIs(false_mutations, row._get_mutations(False))
self.assertIs(false_mutations, row._get_mutations(None))
def test_commit(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
from google.cloud.bigtable_v2.gapic import bigtable_client
project_id = "project-id"
row_key = b"row_key"
table_name = "projects/more-stuff"
column_family_id1 = u"column_family_id1"
column_family_id2 = u"column_family_id2"
column_family_id3 = u"column_family_id3"
column1 = b"column1"
column2 = b"column2"
api = bigtable_client.BigtableClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=project_id, credentials=credentials, admin=True
)
table = _Table(table_name, client=client)
row_filter = RowSampleFilter(0.33)
row = self._make_one(row_key, table, filter_=row_filter)
# Create request_pb
value1 = b"bytes-value"
# Create response_pb
predicate_matched = True
response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched)
# Patch the stub used by the API method.
api.transport.check_and_mutate_row.side_effect = [response_pb]
client._table_data_client = api
# Create expected_result.
expected_result = predicate_matched
# Perform the method and check the result.
row.set_cell(column_family_id1, column1, value1, state=True)
row.delete(state=False)
row.delete_cell(column_family_id2, column2, state=True)
row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
def test_commit_too_many_mutations(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import row as MUT
row_key = b"row_key"
table = object()
filter_ = object()
row = self._make_one(row_key, table, filter_=filter_)
row._true_pb_mutations = [1, 2, 3]
num_mutations = len(row._true_pb_mutations)
with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
def test_commit_no_mutations(self):
from tests.unit._testing import _FakeStub
project_id = "project-id"
row_key = b"row_key"
credentials = _make_credentials()
client = self._make_client(
project=project_id, credentials=credentials, admin=True
)
table = _Table(None, client=client)
filter_ = object()
row = self._make_one(row_key, table, filter_=filter_)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
# Patch the stub used by the API method.
stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertIsNone(result)
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
class TestAppendRow(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigtable.row import AppendRow
return AppendRow
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor(self):
row_key = b"row_key"
table = object()
row = self._make_one(row_key, table)
self.assertEqual(row._row_key, row_key)
self.assertIs(row._table, table)
self.assertEqual(row._rule_pb_list, [])
def test_clear(self):
row_key = b"row_key"
table = object()
row = self._make_one(row_key, table)
row._rule_pb_list = [1, 2, 3]
row.clear()
self.assertEqual(row._rule_pb_list, [])
def test_append_cell_value(self):
table = object()
row_key = b"row_key"
row = self._make_one(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b"column"
column_family_id = u"column_family_id"
value = b"bytes-val"
row.append_cell_value(column_family_id, column, value)
expected_pb = _ReadModifyWriteRulePB(
family_name=column_family_id, column_qualifier=column, append_value=value
)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_increment_cell_value(self):
table = object()
row_key = b"row_key"
row = self._make_one(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b"column"
column_family_id = u"column_family_id"
int_value = 281330
row.increment_cell_value(column_family_id, column, int_value)
expected_pb = _ReadModifyWriteRulePB(
family_name=column_family_id,
column_qualifier=column,
increment_amount=int_value,
)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_commit(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import row as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
project_id = "project-id"
row_key = b"row_key"
table_name = "projects/more-stuff"
column_family_id = u"column_family_id"
column = b"column"
api = bigtable_client.BigtableClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=project_id, credentials=credentials, admin=True
)
table = _Table(table_name, client=client)
row = self._make_one(row_key, table)
# Create request_pb
value = b"bytes-value"
# Create expected_result.
row_responses = []
expected_result = object()
# Patch API calls
client._table_data_client = api
def mock_parse_rmw_row_response(row_response):
row_responses.append(row_response)
return expected_result
# Perform the method and check the result.
with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response):
row.append_cell_value(column_family_id, column, value)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(row._rule_pb_list, [])
def test_commit_no_rules(self):
from tests.unit._testing import _FakeStub
project_id = "project-id"
row_key = b"row_key"
credentials = _make_credentials()
client = self._make_client(
project=project_id, credentials=credentials, admin=True
)
table = _Table(None, client=client)
row = self._make_one(row_key, table)
self.assertEqual(row._rule_pb_list, [])
# Patch the stub used by the API method.
stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, {})
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
def test_commit_too_many_mutations(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import row as MUT
row_key = b"row_key"
table = object()
row = self._make_one(row_key, table)
row._rule_pb_list = [1, 2, 3]
num_mutations = len(row._rule_pb_list)
with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
class Test__parse_rmw_row_response(unittest.TestCase):
def _call_fut(self, row_response):
from google.cloud.bigtable.row import _parse_rmw_row_response
return _parse_rmw_row_response(row_response)
def test_it(self):
from google.cloud._helpers import _datetime_from_microseconds
col_fam1 = u"col-fam-id"
col_fam2 = u"col-fam-id2"
col_name1 = b"col-name1"
col_name2 = b"col-name2"
col_name3 = b"col-name3-but-other-fam"
cell_val1 = b"cell-val"
cell_val2 = b"cell-val-newer"
cell_val3 = b"altcol-cell-val"
cell_val4 = b"foo"
microseconds = 1000871
timestamp = _datetime_from_microseconds(microseconds)
expected_output = {
col_fam1: {
col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)],
col_name2: [(cell_val3, timestamp)],
},
col_fam2: {col_name3: [(cell_val4, timestamp)]},
}
response_row = _RowPB(
families=[
_FamilyPB(
name=col_fam1,
columns=[
_ColumnPB(
qualifier=col_name1,
cells=[
_CellPB(value=cell_val1, timestamp_micros=microseconds),
_CellPB(value=cell_val2, timestamp_micros=microseconds),
],
),
_ColumnPB(
qualifier=col_name2,
cells=[
_CellPB(value=cell_val3, timestamp_micros=microseconds)
],
),
],
),
_FamilyPB(
name=col_fam2,
columns=[
_ColumnPB(
qualifier=col_name3,
cells=[
_CellPB(value=cell_val4, timestamp_micros=microseconds)
],
)
],
),
]
)
sample_input = _ReadModifyWriteRowResponsePB(row=response_row)
self.assertEqual(expected_output, self._call_fut(sample_input))
class Test__parse_family_pb(unittest.TestCase):
def _call_fut(self, family_pb):
from google.cloud.bigtable.row import _parse_family_pb
return _parse_family_pb(family_pb)
def test_it(self):
from google.cloud._helpers import _datetime_from_microseconds
col_fam1 = u"col-fam-id"
col_name1 = b"col-name1"
col_name2 = b"col-name2"
cell_val1 = b"cell-val"
cell_val2 = b"cell-val-newer"
cell_val3 = b"altcol-cell-val"
microseconds = 5554441037
timestamp = _datetime_from_microseconds(microseconds)
expected_dict = {
col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)],
col_name2: [(cell_val3, timestamp)],
}
expected_output = (col_fam1, expected_dict)
sample_input = _FamilyPB(
name=col_fam1,
columns=[
_ColumnPB(
qualifier=col_name1,
cells=[
_CellPB(value=cell_val1, timestamp_micros=microseconds),
_CellPB(value=cell_val2, timestamp_micros=microseconds),
],
),
_ColumnPB(
qualifier=col_name2,
cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)],
),
],
)
self.assertEqual(expected_output, self._call_fut(sample_input))
def _CheckAndMutateRowResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw)
def _ReadModifyWriteRowResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw)
def _CellPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Cell(*args, **kw)
def _ColumnPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Column(*args, **kw)
def _FamilyPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Family(*args, **kw)
def _MutationPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Mutation(*args, **kw)
def _MutationSetCellPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Mutation.SetCell(*args, **kw)
def _MutationDeleteFromColumnPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw)
def _MutationDeleteFromFamilyPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw)
def _MutationDeleteFromRowPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw)
def _RowPB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.Row(*args, **kw)
def _ReadModifyWriteRulePB(*args, **kw):
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
return data_v2_pb2.ReadModifyWriteRule(*args, **kw)
class _Instance(object):
def __init__(self, client=None):
self._client = client
class _Table(object):
def __init__(self, name, client=None):
self.name = name
self._instance = _Instance(client)
self.client = client
self.mutated_rows = []
def mutate_rows(self, rows):
self.mutated_rows.extend(rows)
| {
"content_hash": "3762aedba3ba7a8190bc6902628d0c2c",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 88,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.5925980700262506,
"repo_name": "dhermes/gcloud-python",
"id": "b4aaefb862f83318ebe151927468706fe6863c77",
"size": "27623",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bigtable/tests/unit/test_row.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
} |
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
IR stores typedefs and they are resolved by the code generator.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call. See TypedefResolver class in code_generator_v8.py.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlStringifier
IdlIterable < IdlIterableOrMaplikeOrSetlike
IdlMaplike < IdlIterableOrMaplikeOrSetlike
IdlSetlike < IdlIterableOrMaplikeOrSetlike
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: Object with one or more attributes that is a type.
IdlArgument is 'picklable', as it is stored in interfaces_info.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlType, IdlUnionType, IdlArrayType, IdlSequenceType, IdlNullableType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
################################################################################
# TypedObject
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
by the TypedefResolver before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type_attributes = ('idl_type',)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, idl_name, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.implements = []
self.interfaces = {}
self.idl_name = idl_name
self.typedefs = {}
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(idl_name, child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(idl_name, child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
type_name = child.GetName()
self.typedefs[type_name] = typedef_node_to_type(child)
elif child_class == 'Enum':
enumeration = IdlEnum(idl_name, child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(idl_name, child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
self.implements.append(IdlImplement(child))
elif child_class == 'Dictionary':
dictionary = IdlDictionary(idl_name, child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_definitions(self)
# FIXME: Visit typedefs as well. (We need to add IdlTypedef to do that).
for interface in self.interfaces.itervalues():
interface.accept(visitor)
for callback_function in self.callback_functions.itervalues():
callback_function.accept(visitor)
for dictionary in self.dictionaries.itervalues():
dictionary.accept(visitor)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
continue
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node, arguments_node = children
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(idl_name, arguments_node)
def accept(self, visitor):
visitor.visit_callback_function(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary(self)
for member in self.members:
member.accept(visitor)
class IdlDictionaryMember(TypedObject):
def __init__(self, idl_name, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.idl_name = idl_name
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary_member(self)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
# FIXME: remove, just treat enums as a dictionary
def __init__(self, idl_name, node):
self.idl_name = idl_name
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, idl_name, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.stringifier = None
self.iterable = None
self.maplike = None
self.setlike = None
self.original_interface = None
self.partial_interfaces = []
if not node: # Early exit for IdlException.__init__
return
self.is_callback = bool(node.GetProperty('CALLBACK'))
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
self.attributes.append(IdlAttribute(idl_name, child))
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
self.operations.append(IdlOperation(idl_name, child))
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(idl_name, child)
self.process_stringifier()
elif child_class == 'Iterable':
self.iterable = IdlIterable(idl_name, child)
elif child_class == 'Maplike':
self.maplike = IdlMaplike(idl_name, child)
elif child_class == 'Setlike':
self.setlike = IdlSetlike(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
if len(filter(None, [self.iterable, self.maplike, self.setlike])) > 1:
raise ValueError('Interface can only have one of iterable<>, maplike<> and setlike<>.')
def accept(self, visitor):
visitor.visit_interface(self)
for attribute in self.attributes:
attribute.accept(visitor)
for constant in self.constants:
constant.accept(visitor)
for constructor in self.constructors:
constructor.accept(visitor)
for custom_constructor in self.custom_constructors:
custom_constructor.accept(visitor)
for operation in self.operations:
operation.accept(visitor)
if self.iterable:
self.iterable.accept(visitor)
elif self.maplike:
self.maplike.accept(visitor)
elif self.setlike:
self.setlike.accept(visitor)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, idl_name, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self, idl_name)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(idl_name, child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, idl_name, node):
self.is_read_only = bool(node.GetProperty('READONLY'))
self.is_static = bool(node.GetProperty('STATIC'))
self.idl_name = idl_name
self.name = node.GetName()
# Defaults, overridden below
self.idl_type = None
self.extended_attributes = {}
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_attribute(self)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.idl_name = idl_name
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
else:
self.extended_attributes = {}
def accept(self, visitor):
visitor.visit_constant(self)
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
return 'String("%s")' % self.value
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'nullptr'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type in ['boolean', 'sequence']:
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, idl_name, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
self.idl_name = idl_name
self.idl_type = None
self.is_static = False
if not node:
return
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = bool(node.GetProperty('STATIC'))
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(idl_name, child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, idl_name, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls(idl_name)
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, idl_name, arguments_node):
constructor = cls(idl_name)
constructor.name = name
constructor.arguments = arguments_node_to_arguments(idl_name, arguments_node)
constructor.is_constructor = True
return constructor
def accept(self, visitor):
visitor.visit_operation(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, idl_name, node=None):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = False # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.idl_name = idl_name
self.default_value = None
if not node:
return
self.is_optional = node.GetProperty('OPTIONAL')
self.name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = bool(child.GetProperty('ELLIPSIS'))
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def __getstate__(self):
# FIXME: Return a picklable object which has enough information to
# unpickle.
return {}
def __setstate__(self, state):
pass
def accept(self, visitor):
visitor.visit_argument(self)
def arguments_node_to_arguments(idl_name, node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(idl_name, argument_node)
for argument_node in node.GetChildren()]
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, idl_name, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(idl_name, child)
elif child_class == 'Operation':
operation = IdlOperation(idl_name, child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Iterable, Maplike, Setlike
################################################################################
class IdlIterableOrMaplikeOrSetlike(TypedObject):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.type_children = []
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Type':
self.type_children.append(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlIterable(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, idl_name, node):
super(IdlIterable, self).__init__(idl_name, node)
if len(self.type_children) == 1:
self.key_type = None
self.value_type = type_node_to_type(self.type_children[0])
elif len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of type children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_iterable(self)
class IdlMaplike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, idl_name, node):
super(IdlMaplike, self).__init__(idl_name, node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_maplike(self)
class IdlSetlike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('value_type',)
def __init__(self, idl_name, node):
super(IdlSetlike, self).__init__(idl_name, node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 1:
self.value_type = type_node_to_type(self.type_children[0])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_setlike(self)
################################################################################
# Implement statements
################################################################################
class IdlImplement(object):
def __init__(self, node):
self.left_interface = node.GetName()
self.right_interface = node.GetProperty('REFERENCE')
################################################################################
# Extended attributes
################################################################################
class Exposure:
"""An Exposure holds one Exposed or RuntimeEnabled condition.
Each exposure has two properties: exposed and runtime_enabled.
Exposure(e, r) corresponds to [Exposed(e r)]. Exposure(e) corresponds to
[Exposed=e].
"""
def __init__(self, exposed, runtime_enabled=None):
self.exposed = exposed
self.runtime_enabled = runtime_enabled
def ext_attributes_node_to_extended_attributes(idl_name, node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = arguments_node_to_arguments(idl_name, child)
elif name == 'Exposed':
if child_class and child_class != 'Arguments':
raise ValueError('[Exposed] only supports Arguments as child, but has child of class: %s' % child_class)
exposures = []
if child_class == 'Arguments':
exposures = [Exposure(exposed=str(arg.idl_type),
runtime_enabled=arg.name)
for arg in arguments_node_to_arguments('*', child)]
else:
value = extended_attribute_node.GetProperty('VALUE')
if type(value) is str:
exposures = [Exposure(exposed=value)]
else:
exposures = [Exposure(exposed=v) for v in value]
extended_attributes[name] = exposures
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(idl_name, extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', idl_name, arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', idl_name, arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', idl_name, arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = bool(node.GetProperty('UNRESTRICTED'))
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class == 'Sequence':
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
elif node_class == 'Promise':
return IdlType('Promise')
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Sequence node expects exactly 1 child, got %s' % len(children))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
sequence_type = IdlSequenceType(element_type)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
################################################################################
# Visitor
################################################################################
class Visitor(object):
"""Abstract visitor class for IDL definitions traverse."""
def visit_definitions(self, definitions):
pass
def visit_typed_object(self, typed_object):
pass
def visit_callback_function(self, callback_function):
self.visit_typed_object(callback_function)
def visit_dictionary(self, dictionary):
pass
def visit_dictionary_member(self, member):
self.visit_typed_object(member)
def visit_interface(self, interface):
pass
def visit_attribute(self, attribute):
self.visit_typed_object(attribute)
def visit_constant(self, constant):
self.visit_typed_object(constant)
def visit_operation(self, operation):
self.visit_typed_object(operation)
def visit_argument(self, argument):
self.visit_typed_object(argument)
def visit_iterable(self, iterable):
self.visit_typed_object(iterable)
def visit_maplike(self, maplike):
self.visit_typed_object(maplike)
def visit_setlike(self, setlike):
self.visit_typed_object(setlike)
| {
"content_hash": "06993623da8561c8afe561ae216a90ca",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 158,
"avg_line_length": 40.23541247484909,
"alnum_prop": 0.5843376506475971,
"repo_name": "CTSRD-SOAAP/chromium-42.0.2311.135",
"id": "693e1114beb288fa294f4ab36ca6aef9f97bf26b",
"size": "41524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Source/bindings/scripts/idl_definitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "241154"
},
{
"name": "C",
"bytes": "12370053"
},
{
"name": "C++",
"bytes": "266788423"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "813488"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "20131029"
},
{
"name": "Java",
"bytes": "8495790"
},
{
"name": "JavaScript",
"bytes": "12980966"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "208709"
},
{
"name": "Objective-C",
"bytes": "1509363"
},
{
"name": "Objective-C++",
"bytes": "7960581"
},
{
"name": "PLpgSQL",
"bytes": "215882"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "432373"
},
{
"name": "Python",
"bytes": "11147426"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1207731"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
from functools import partial
import tests.modules.vim as vim_module
from tests.modules.lib import Pl
from tests.modules import TestCase
class TestVim(TestCase):
def test_single_tab(self):
pl = Pl()
single_tab = partial(self.vim.single_tab, pl=pl, segment_info=None, mode=None)
with vim_module._with('tabpage'):
self.assertEqual(single_tab(), False)
self.assertEqual(single_tab(), True)
@classmethod
def setUpClass(cls):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'vim_sys_path')))
from powerline.selectors import vim
cls.vim = vim
@classmethod
def tearDownClass(cls):
sys.path.pop(0)
if __name__ == '__main__':
from tests.modules import main
main()
| {
"content_hash": "6342512177f84b3a429e0af2b0a7bc82",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 111,
"avg_line_length": 24.02857142857143,
"alnum_prop": 0.7217598097502973,
"repo_name": "codeprimate/arid",
"id": "74ace8d78e8e7b4b6c4829b0042e9e96a03a9f05",
"size": "871",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "powerline/tests/test_python/test_selectors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "10691"
},
{
"name": "C++",
"bytes": "1006"
},
{
"name": "CSS",
"bytes": "4962"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Dockerfile",
"bytes": "1126"
},
{
"name": "Erlang",
"bytes": "10460"
},
{
"name": "HTML",
"bytes": "6705"
},
{
"name": "JavaScript",
"bytes": "9357"
},
{
"name": "Lua",
"bytes": "436"
},
{
"name": "Makefile",
"bytes": "31299"
},
{
"name": "Perl",
"bytes": "51050"
},
{
"name": "Python",
"bytes": "872670"
},
{
"name": "Roff",
"bytes": "5000"
},
{
"name": "Ruby",
"bytes": "129834"
},
{
"name": "Shell",
"bytes": "1798938"
},
{
"name": "Smarty",
"bytes": "318"
},
{
"name": "TeX",
"bytes": "25888"
},
{
"name": "Vim Script",
"bytes": "8338240"
},
{
"name": "Vim Snippet",
"bytes": "597916"
}
],
"symlink_target": ""
} |
"""
Converts lcov line coverage output to Cobertura-compatible XML for CI
"""
import re
import sys
import os
import time
import subprocess
from xml.dom import minidom
from optparse import OptionParser
from distutils.spawn import find_executable
CPPFILT = "c++filt"
HAVE_CPPFILT = False
if find_executable(CPPFILT) is not None:
HAVE_CPPFILT = True
VERSION = '1.6'
__all__ = ['LcovCobertura']
class Demangler(object):
def __init__(self):
self.pipe = subprocess.Popen(
CPPFILT, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def demangle(self, name):
self.pipe.stdin.write(name + "\n")
return self.pipe.stdout.readline().rstrip()
class LcovCobertura(object):
"""
Converts code coverage report files in lcov format to Cobertura's XML
report format so that CI servers like Jenkins can aggregate results and
determine build stability etc.
>>> from lcov_cobertura import LcovCobertura
>>> LCOV_INPUT = 'your lcov input'
>>> converter = LcovCobertura(LCOV_INPUT)
>>> cobertura_xml = converter.convert()
>>> print(cobertura_xml)
"""
def __init__(self, lcov_data, base_dir='.', excludes=None, demangle=False):
"""
Create a new :class:`LcovCobertura` object using the given `lcov_data`
and `options`.
:param lcov_data: Path to LCOV data file
:type lcov_data: string
:param base_dir: Path upon which to base all sources
:type base_dir: string
:param excludes: list of regexes to packages as excluded
:type excludes: [string]
:param demangle: whether to demangle function names using c++filt
:type demangle: bool
"""
if not excludes:
excludes = []
self.lcov_data = lcov_data
self.base_dir = base_dir
self.excludes = excludes
if demangle:
demangler = Demangler()
self.format = demangler.demangle
else:
self.format = lambda x: x
def convert(self):
"""
Convert lcov file to cobertura XML using options from this instance.
"""
coverage_data = self.parse()
return self.generate_cobertura_xml(coverage_data)
def parse(self):
"""
Generate a data structure representing it that can be serialized in any
logical format.
"""
coverage_data = {
'packages': {},
'summary': {'lines-total': 0, 'lines-covered': 0,
'branches-total': 0, 'branches-covered': 0},
'timestamp': str(int(time.time()))
}
package = None
current_file = None
file_lines_total = 0
file_lines_covered = 0
file_lines = {}
file_methods = {}
file_branches_total = 0
file_branches_covered = 0
for line in self.lcov_data.split('\n'):
if line.strip() == 'end_of_record':
if current_file is not None:
package_dict = coverage_data['packages'][package]
package_dict['lines-total'] += file_lines_total
package_dict['lines-covered'] += file_lines_covered
package_dict['branches-total'] += file_branches_total
package_dict['branches-covered'] += file_branches_covered
file_dict = package_dict['classes'][current_file]
file_dict['lines-total'] = file_lines_total
file_dict['lines-covered'] = file_lines_covered
file_dict['lines'] = dict(file_lines)
file_dict['methods'] = dict(file_methods)
file_dict['branches-total'] = file_branches_total
file_dict['branches-covered'] = file_branches_covered
coverage_data['summary']['lines-total'] += file_lines_total
coverage_data['summary']['lines-covered'] += file_lines_covered
coverage_data['summary']['branches-total'] += file_branches_total
coverage_data['summary']['branches-covered'] += file_branches_covered
line_parts = line.split(':', 1)
input_type = line_parts[0]
if input_type == 'SF':
# Get file name
file_name = line_parts[-1].strip()
relative_file_name = os.path.relpath(file_name, self.base_dir)
package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])
class_name = '.'.join(relative_file_name.split(os.path.sep))
if package not in coverage_data['packages']:
coverage_data['packages'][package] = {
'classes': {}, 'lines-total': 0, 'lines-covered': 0,
'branches-total': 0, 'branches-covered': 0
}
coverage_data['packages'][package]['classes'][
relative_file_name] = {
'name': class_name, 'lines': {}, 'lines-total': 0,
'lines-covered': 0, 'branches-total': 0,
'branches-covered': 0
}
package = package
current_file = relative_file_name
file_lines_total = 0
file_lines_covered = 0
file_lines.clear()
file_methods.clear()
file_branches_total = 0
file_branches_covered = 0
elif input_type == 'DA':
# DA:2,0
(line_number, line_hits) = line_parts[-1].strip().split(',')
line_number = int(line_number)
if line_number not in file_lines:
file_lines[line_number] = {
'branch': 'false', 'branches-total': 0,
'branches-covered': 0
}
file_lines[line_number]['hits'] = line_hits
# Increment lines total/covered for class and package
try:
if int(line_hits) > 0:
file_lines_covered += 1
except:
pass
file_lines_total += 1
elif input_type == 'BRDA':
# BRDA:1,1,2,0
(line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')
line_number = int(line_number)
if line_number not in file_lines:
file_lines[line_number] = {
'branch': 'true', 'branches-total': 0,
'branches-covered': 0, 'hits': 0
}
file_lines[line_number]['branch'] = 'true'
file_lines[line_number]['branches-total'] += 1
file_branches_total += 1
if branch_hits != '-' and int(branch_hits) > 0:
file_lines[line_number]['branches-covered'] += 1
file_branches_covered += 1
elif input_type == 'BRF':
file_branches_total = int(line_parts[1])
elif input_type == 'BRH':
file_branches_covered = int(line_parts[1])
elif input_type == 'FN':
# FN:5,(anonymous_1)
function_line, function_name = line_parts[-1].strip().split(',')
file_methods[function_name] = [function_line, '0']
elif input_type == 'FNDA':
# FNDA:0,(anonymous_1)
(function_hits, function_name) = line_parts[-1].strip().split(',')
if function_name not in file_methods:
file_methods[function_name] = ['0', '0']
file_methods[function_name][-1] = function_hits
# Exclude packages
excluded = [x for x in coverage_data['packages'] for e in self.excludes
if re.match(e, x)]
for package in excluded:
del coverage_data['packages'][package]
# Compute line coverage rates
for package_data in list(coverage_data['packages'].values()):
package_data['line-rate'] = self._percent(
package_data['lines-total'],
package_data['lines-covered'])
package_data['branch-rate'] = self._percent(
package_data['branches-total'],
package_data['branches-covered'])
return coverage_data
def generate_cobertura_xml(self, coverage_data):
"""
Given parsed coverage data, return a String cobertura XML representation.
:param coverage_data: Nested dict representing coverage information.
:type coverage_data: dict
"""
dom_impl = minidom.getDOMImplementation()
doctype = dom_impl.createDocumentType("coverage", None,
"http://cobertura.sourceforge.net/xml/coverage-04.dtd")
document = dom_impl.createDocument(None, "coverage", doctype)
root = document.documentElement
summary = coverage_data['summary']
self._attrs(root, {
'branch-rate': self._percent(summary['branches-total'],
summary['branches-covered']),
'branches-covered': str(summary['branches-covered']),
'branches-valid': str(summary['branches-total']),
'complexity': '0',
'line-rate': self._percent(summary['lines-total'],
summary['lines-covered']),
'lines-covered': str(summary['lines-covered']),
'lines-valid': str(summary['lines-total']),
'timestamp': coverage_data['timestamp'],
'version': '2.0.3'
})
sources = self._el(document, 'sources', {})
source = self._el(document, 'source', {})
source.appendChild(document.createTextNode(self.base_dir))
sources.appendChild(source)
root.appendChild(sources)
packages_el = self._el(document, 'packages', {})
packages = coverage_data['packages']
for package_name, package_data in list(packages.items()):
package_el = self._el(document, 'package', {
'line-rate': package_data['line-rate'],
'branch-rate': package_data['branch-rate'],
'name': package_name,
'complexity': '0',
})
classes_el = self._el(document, 'classes', {})
for class_name, class_data in list(package_data['classes'].items()):
class_el = self._el(document, 'class', {
'branch-rate': self._percent(class_data['branches-total'],
class_data['branches-covered']),
'complexity': '0',
'filename': class_name,
'line-rate': self._percent(class_data['lines-total'],
class_data['lines-covered']),
'name': class_data['name']
})
# Process methods
methods_el = self._el(document, 'methods', {})
for method_name, (line, hits) in list(class_data['methods'].items()):
method_el = self._el(document, 'method', {
'name': self.format(method_name),
'signature': '',
'line-rate': '1.0' if int(hits) > 0 else '0.0',
'branch-rate': '1.0' if int(hits) > 0 else '0.0',
})
method_lines_el = self._el(document, 'lines', {})
method_line_el = self._el(document, 'line', {
'hits': hits,
'number': line,
'branch': 'false',
})
method_lines_el.appendChild(method_line_el)
method_el.appendChild(method_lines_el)
methods_el.appendChild(method_el)
# Process lines
lines_el = self._el(document, 'lines', {})
lines = list(class_data['lines'].keys())
lines.sort()
for line_number in lines:
line_el = self._el(document, 'line', {
'branch': class_data['lines'][line_number]['branch'],
'hits': str(class_data['lines'][line_number]['hits']),
'number': str(line_number)
})
if class_data['lines'][line_number]['branch'] == 'true':
total = int(class_data['lines'][line_number]['branches-total'])
covered = int(class_data['lines'][line_number]['branches-covered'])
percentage = int((covered * 100.0) / total)
line_el.setAttribute('condition-coverage',
'{0}% ({1}/{2})'.format(
percentage, covered, total))
lines_el.appendChild(line_el)
class_el.appendChild(methods_el)
class_el.appendChild(lines_el)
classes_el.appendChild(class_el)
package_el.appendChild(classes_el)
packages_el.appendChild(package_el)
root.appendChild(packages_el)
return document.toprettyxml()
def _el(self, document, name, attrs):
"""
Create an element within document with given name and attributes.
:param document: Document element
:type document: Document
:param name: Element name
:type name: string
:param attrs: Attributes for element
:type attrs: dict
"""
return self._attrs(document.createElement(name), attrs)
def _attrs(self, element, attrs):
"""
Set attributes on given element.
:param element: DOM Element
:type element: Element
:param attrs: Attributes for element
:type attrs: dict
"""
for attr, val in list(attrs.items()):
element.setAttribute(attr, val)
return element
def _percent(self, lines_total, lines_covered):
"""
Get the percentage of lines covered in the total, with formatting.
:param lines_total: Total number of lines in given module
:type lines_total: number
:param lines_covered: Number of lines covered by tests in module
:type lines_covered: number
"""
if lines_total == 0:
return '0.0'
return str(float(float(lines_covered) / float(lines_total)))
if __name__ == '__main__':
def main(argv):
"""
Converts LCOV coverage data to Cobertura-compatible XML for reporting.
Usage:
lcov_cobertura.py lcov-file.dat
lcov_cobertura.py lcov-file.dat -b src/dir -e test.lib -o path/out.xml
By default, XML output will be written to ./coverage.xml
"""
parser = OptionParser()
parser.usage = ('lcov_cobertura.py lcov-file.dat [-b source/dir] '
'[-e <exclude packages regex>] [-o output.xml] [-d]')
parser.description = 'Converts lcov output to cobertura-compatible XML'
parser.add_option('-b', '--base-dir', action='store',
help='Directory where source files are located',
dest='base_dir', default='.')
parser.add_option('-e', '--excludes',
help='Comma-separated list of regexes of packages to exclude',
action='append', dest='excludes', default=[])
parser.add_option('-o', '--output',
help='Path to store cobertura xml file',
action='store', dest='output', default='coverage.xml')
parser.add_option('-d', '--demangle',
help='Demangle C++ function names using %s' % CPPFILT,
action='store_true', dest='demangle', default=False)
(options, args) = parser.parse_args(args=argv)
if options.demangle and not HAVE_CPPFILT:
raise RuntimeError("C++ filter executable (%s) not found!" % CPPFILT)
if len(args) != 2:
print(main.__doc__)
sys.exit(1)
try:
with open(args[1], 'r') as lcov_file:
lcov_data = lcov_file.read()
lcov_cobertura = LcovCobertura(lcov_data, options.base_dir, options.excludes, options.demangle)
cobertura_xml = lcov_cobertura.convert()
with open(options.output, mode='wt') as output_file:
output_file.write(cobertura_xml)
except IOError:
sys.stderr.write("Unable to convert %s to Cobertura XML" % args[1])
main(sys.argv)
| {
"content_hash": "9b2bcfc058a1e17f3ba68aaa837a2aa3",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 111,
"avg_line_length": 41.94074074074074,
"alnum_prop": 0.5177793476981043,
"repo_name": "DavidAntliff/AwaLWM2M",
"id": "75949f29d6670b76faeadb705480219976236a54",
"size": "17171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ci/lcov_cobertura.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2622046"
},
{
"name": "C++",
"bytes": "1886501"
},
{
"name": "CMake",
"bytes": "42648"
},
{
"name": "Makefile",
"bytes": "10057"
},
{
"name": "Objective-C",
"bytes": "202291"
},
{
"name": "Python",
"bytes": "160909"
},
{
"name": "Shell",
"bytes": "9145"
}
],
"symlink_target": ""
} |
import sys
import time
import pstats
import cProfile
import timeit
sys.path.insert(0, '.')
import vcfnp
def profile():
a = vcfnp.info(sys.argv[1], count=int(sys.argv[2]))
prof_fn = 'profile/tmp.prof'
cmd = 'profile()'
cProfile.runctx(cmd, globals(), locals(), prof_fn)
s = pstats.Stats(prof_fn)
s.strip_dirs().sort_stats('time').print_stats()
print timeit.repeat(cmd, number=int(sys.argv[3]), repeat=int(sys.argv[4]), setup='from __main__ import profile')
| {
"content_hash": "86db0c1de3090687b0ae3dbfbfa8ea35",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 112,
"avg_line_length": 20.347826086956523,
"alnum_prop": 0.6901709401709402,
"repo_name": "kyleabeauchamp/vcfnp",
"id": "395cf7e843f2a52f233c296022dc5a2c1cb889ba",
"size": "491",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "profile/vcfnp_info.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144922"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from bokeh import mpl
from bokeh.plotting import output_file, show
ts = pd.Series(np.random.randn(1000),
index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
p = ts.plot()
output_file("series.html")
show(mpl.to_bokeh())
| {
"content_hash": "670b9833088e52bdbebfc6f7a1c6ccdf",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 61,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.6793103448275862,
"repo_name": "carlvlewis/bokeh",
"id": "f918f9b91e330fb735e335ed2832c8e12d52b501",
"size": "290",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "examples/compat/pandas/series.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413395"
},
{
"name": "CoffeeScript",
"bytes": "1965151"
},
{
"name": "HTML",
"bytes": "1546053"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1369394"
},
{
"name": "Shell",
"bytes": "13893"
}
],
"symlink_target": ""
} |
import logging
import os
SRC_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# ============================================================================
# a flask settings
# http://flask.pocoo.org/docs/config/#configuring-from-files
# ============================================================================
SECRET_KEY = '47e585de7f22984d5ee291c2f31412384bfc32d0'
FLASH_MESSAGES = True
# Flask-SQLAlchemy
# http://pythonhosted.org/Flask-SQLAlchemy/config.html
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(SRC_DIR, "db.sqlite")
SQLALCHEMY_ECHO = False # Doubles log statements, investigate
# Flask-Login
# https://flask-login.readthedocs.org/en/latest/#protecting-views
LOGIN_DISABLED = False
# Flask-Security
# http://pythonhosted.org/Flask-Security/configuration.html
SECURITY_PASSWORD_SALT = "abc"
# SECURITY_PASSWORD_HASH = "bcrypt" # requires py-bcrypt
# SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_HASH = "plaintext"
SECURITY_EMAIL_SENDER = "support@example.com"
SECURITY_CONFIRMABLE = True
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_CHANGEABLE = True
SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05"
SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47"
SECURITY_DEFAULT_REMEMBER_ME = True
# Set secret keys for CSRF protection
CSRF_SESSION_KEY = '8a7474974efcf76896aa84eea9cbe016bbc08828'
CSRF_ENABLED = True
# Flask-Babel
# http://pythonhosted.org/Flask-Babel/
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_TIMEZONE = "UTC"
# Flask-Mail
# http://pythonhosted.org/Flask-Mail/
SERVER_EMAIL = 'Flask-SocialBlueprint <support@example.com>'
# Flask-SocialBlueprint
# https://github.com/wooyek/flask-social-blueprint
SOCIAL_BLUEPRINT = {
# https://developers.facebook.com/apps/
"flask_social_blueprint.providers.Facebook": {
# App ID
'consumer_key': '197…',
# App Secret
'consumer_secret': 'c956c1…'
},
# https://apps.twitter.com/app/new
"flask_social_blueprint.providers.Twitter": {
# Your access token from API Keys tab
'consumer_key': 'bkp…',
# access token secret
'consumer_secret': 'pHUx…'
},
# https://console.developers.google.com/project
"flask_social_blueprint.providers.Google": {
# Client ID
'consumer_key': '797….apps.googleusercontent.com',
# Client secret
'consumer_secret': 'bDG…'
},
# https://github.com/settings/applications/new
"flask_social_blueprint.providers.Github": {
# Client ID
'consumer_key': '6f6…',
# Client Secret
'consumer_secret': '1a9…'
},
}
| {
"content_hash": "8b576284e9e3e46edd6f89b1b9a24219",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 30.363636363636363,
"alnum_prop": 0.6613023952095808,
"repo_name": "maxtortime/flask-social-blueprint",
"id": "325eadf9a3885360c6a92e9d455cd4965c063a35",
"size": "2741",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "example/sqla/website/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5544"
},
{
"name": "HTML",
"bytes": "27393"
},
{
"name": "Python",
"bytes": "56981"
}
],
"symlink_target": ""
} |
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
desktop_file_location = "/root/.local/share/applications/rubymine-2017.2.desktop"
def test_desktop_file_exists(File):
f = File(desktop_file_location)
assert f.exists
assert f.is_file
def test_desktop_file_contains_fullpath(File):
f = File(desktop_file_location)
assert f.contains("/root/Tools/RubyMine-2017.2/bin/rubymine.png")
assert f.contains("/root/Tools/RubyMine-2017.2/bin/rubymine.sh")
def test_desktop_file_contains_right_name(File):
f = File(desktop_file_location)
assert f.contains("RubyMine 2017.2")
def test_start_file_exists(File):
f = File('/root/Tools/RubyMine-2017.2/bin/rubymine.sh')
assert f.exists
assert f.is_file
| {
"content_hash": "939be51e2461645c7583728b0279d41c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 24.91176470588235,
"alnum_prop": 0.7213695395513577,
"repo_name": "henriklynggaard/ansible-role-rubymine",
"id": "30bef98e146f9b5e6f26055a28f420b6d6fc729d",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rubymine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "847"
}
],
"symlink_target": ""
} |
from os.path import realpath
from mininet.util import errFail, errRun
from mininet.log import debug, info
import sys
# Utility functions for unmounting a tree
# Real path of OSHI's dir
MNRUNDIR = realpath( '/var/run/mn' )
# Take the mounted points of the root machine
def mountPoints():
"Return list of mounted file systems"
mtab, _err, _ret = errFail( 'cat /proc/mounts' )
lines = mtab.split( '\n' )
mounts = []
for line in lines:
if not line:
continue
fields = line.split( ' ')
mount = fields[ 1 ]
mounts.append( mount )
return mounts
# Utility Function for unmount all the dirs
def unmountAll( rootdir=MNRUNDIR ):
"Unmount all mounts under a directory tree"
rootdir = realpath( rootdir )
# Find all mounts below rootdir
# This is subtle because /foo is not
# a parent of /foot
dirslash = rootdir + '/'
mounts = [ m for m in mountPoints()
if m == dir or m.find( dirslash ) == 0 ]
# Unmount them from bottom to top
mounts.sort( reverse=True )
for mount in mounts:
debug( 'Unmounting', mount, '\n' )
_out, err, code = errRun( 'umount', mount )
if code != 0:
info( '*** Warning: failed to umount', mount, '\n' )
info( err )
| {
"content_hash": "229888691f39259b46c3edf0224ed2cc",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 64,
"avg_line_length": 30.209302325581394,
"alnum_prop": 0.6096997690531177,
"repo_name": "netgroup/Dreamer-Management-Scripts",
"id": "3fb44af00165c64369f408a8396f9b8ab8c5e7ff",
"size": "2393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oshi/vs/utility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "471"
},
{
"name": "Python",
"bytes": "20519"
},
{
"name": "Shell",
"bytes": "88397"
}
],
"symlink_target": ""
} |
"""Support for Jewish Calendar binary sensors."""
from __future__ import annotations
import datetime as dt
from datetime import datetime
from typing import cast
import hdate
from hdate.zmanim import Zmanim
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers import event
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from . import DOMAIN
BINARY_SENSORS = BinarySensorEntityDescription(
key="issur_melacha_in_effect",
name="Issur Melacha in Effect",
icon="mdi:power-plug-off",
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Jewish Calendar binary sensor devices."""
if discovery_info is None:
return
async_add_entities([JewishCalendarBinarySensor(hass.data[DOMAIN], BINARY_SENSORS)])
class JewishCalendarBinarySensor(BinarySensorEntity):
"""Representation of an Jewish Calendar binary sensor."""
_attr_should_poll = False
def __init__(
self,
data: dict[str, str | bool | int | float],
description: BinarySensorEntityDescription,
) -> None:
"""Initialize the binary sensor."""
self._attr_name = f"{data['name']} {description.name}"
self._attr_unique_id = f"{data['prefix']}_{description.key}"
self._location = data["location"]
self._hebrew = data["language"] == "hebrew"
self._candle_lighting_offset = data["candle_lighting_offset"]
self._havdalah_offset = data["havdalah_offset"]
self._update_unsub: CALLBACK_TYPE | None = None
@property
def is_on(self) -> bool:
"""Return true if sensor is on."""
return cast(bool, self._get_zmanim().issur_melacha_in_effect)
def _get_zmanim(self) -> Zmanim:
"""Return the Zmanim object for now()."""
return hdate.Zmanim(
date=dt_util.now(),
location=self._location,
candle_lighting_offset=self._candle_lighting_offset,
havdalah_offset=self._havdalah_offset,
hebrew=self._hebrew,
)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
self._schedule_update()
@callback
def _update(self, now: datetime | None = None) -> None:
"""Update the state of the sensor."""
self._update_unsub = None
self._schedule_update()
self.async_write_ha_state()
def _schedule_update(self) -> None:
"""Schedule the next update of the sensor."""
now = dt_util.now()
zmanim = self._get_zmanim()
update = zmanim.zmanim["sunrise"] + dt.timedelta(days=1)
candle_lighting = zmanim.candle_lighting
if candle_lighting is not None and now < candle_lighting < update:
update = candle_lighting
havdalah = zmanim.havdalah
if havdalah is not None and now < havdalah < update:
update = havdalah
if self._update_unsub:
self._update_unsub()
self._update_unsub = event.async_track_point_in_time(
self.hass, self._update, update
)
| {
"content_hash": "105651048ff05bb350e45ed5a99d2c86",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 87,
"avg_line_length": 33.91346153846154,
"alnum_prop": 0.652112276722427,
"repo_name": "jawilson/home-assistant",
"id": "f239dfc31b68f60afd34a7a071a42b996e59635f",
"size": "3527",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "homeassistant/components/jewish_calendar/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import dataclasses
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, List, Mapping, Optional, Tuple
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.util_rules import pex_environment
from pants.backend.python.util_rules.pex_environment import (
PexEnvironment,
PexRuntimeEnvironment,
PythonExecutable,
)
from pants.core.util_rules import external_tool
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests
from pants.engine.internals.selectors import MultiGet
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessCacheScope
from pants.engine.rules import Get, collect_rules, rule
from pants.option.global_options import GlobalOptions
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import classproperty, frozen_after_init
from pants.util.strutil import create_path_env_var
class PexBinary(TemplatedExternalTool):
options_scope = "download-pex-bin"
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
default_version = "v2.1.34"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
@classproperty
def default_known_versions(cls):
return [
"|".join(
(
cls.default_version,
plat,
"9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f",
"3597768",
)
)
for plat in ["darwin", "linux"]
]
@frozen_after_init
@dataclass(unsafe_hash=True)
class PexCliProcess:
argv: Tuple[str, ...]
description: str = dataclasses.field(compare=False)
additional_input_digest: Optional[Digest]
extra_env: Optional[FrozenDict[str, str]]
output_files: Optional[Tuple[str, ...]]
output_directories: Optional[Tuple[str, ...]]
python: Optional[PythonExecutable]
level: LogLevel
cache_scope: ProcessCacheScope
def __init__(
self,
*,
argv: Iterable[str],
description: str,
additional_input_digest: Optional[Digest] = None,
extra_env: Optional[Mapping[str, str]] = None,
output_files: Optional[Iterable[str]] = None,
output_directories: Optional[Iterable[str]] = None,
python: Optional[PythonExecutable] = None,
level: LogLevel = LogLevel.INFO,
cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
) -> None:
self.argv = tuple(argv)
self.description = description
self.additional_input_digest = additional_input_digest
self.extra_env = FrozenDict(extra_env) if extra_env else None
self.output_files = tuple(output_files) if output_files else None
self.output_directories = tuple(output_directories) if output_directories else None
self.python = python
self.level = level
self.cache_scope = cache_scope
self.__post_init__()
def __post_init__(self) -> None:
if "--pex-root-path" in self.argv:
raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
class PexPEX(DownloadedExternalTool):
"""The Pex PEX binary."""
@rule
async def download_pex_pex(pex_binary: PexBinary) -> PexPEX:
pex_pex = await Get(
DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)
)
return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)
@rule
async def setup_pex_cli_process(
request: PexCliProcess,
pex_binary: PexPEX,
pex_env: PexEnvironment,
python_native_code: PythonNativeCode,
global_options: GlobalOptions,
pex_runtime_env: PexRuntimeEnvironment,
) -> Process:
tmpdir = ".tmp"
gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
cert_args = []
# The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.
# Instead we manually create a FileContent for it.
if global_options.options.ca_certs_path:
ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()
chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)
gets.append(
Get(
Digest,
CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),
)
)
cert_args = ["--cert", chrooted_ca_certs_path]
digests_to_merge = [pex_binary.digest]
digests_to_merge.extend(await MultiGet(gets))
if request.additional_input_digest:
digests_to_merge.append(request.additional_input_digest)
input_digest = await Get(Digest, MergeDigests(digests_to_merge))
pex_root_path = ".cache/pex_root"
argv = [
pex_binary.exe,
*cert_args,
"--python-path",
create_path_env_var(pex_env.interpreter_search_paths),
"--pex-root",
pex_root_path,
# Ensure Pex and its subprocesses create temporary files in the the process execution
# sandbox. It may make sense to do this generally for Processes, but in the short term we
# have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
# perform. Making the TMPDIR local to the sandbox allows control via
# --local-execution-root-dir for the local case and should work well with remote cases where
# a remoting implementation has to allow for processes producing large binaries in a
# sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
# environment variable allows Pex to absolutize the path ensuring subprocesses that change
# CWD can find the TMPDIR.
"--tmpdir",
tmpdir,
]
if pex_runtime_env.verbosity > 0:
argv.append(f"-{'v' * pex_runtime_env.verbosity}")
# NB: This comes at the end of the argv because the request may use `--` passthrough args,
# which must come at the end.
argv.extend(request.argv)
normalized_argv = pex_env.create_argv(*argv, python=request.python)
env = {
**pex_env.environment_dict(python_configured=request.python is not None),
**python_native_code.environment_dict,
**(request.extra_env or {}),
}
return Process(
normalized_argv,
description=request.description,
input_digest=input_digest,
env=env,
output_files=request.output_files,
output_directories=request.output_directories,
append_only_caches={"pex_root": pex_root_path},
level=request.level,
cache_scope=request.cache_scope,
)
def rules():
return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]
| {
"content_hash": "46420f4b401008110f42ea383983608c",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 100,
"avg_line_length": 37.2962962962963,
"alnum_prop": 0.6725776705915733,
"repo_name": "jsirois/pants",
"id": "e5111c2087244c46964137736169c1c04e1e213c",
"size": "7181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/python/util_rules/pex_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6008"
},
{
"name": "Mustache",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "2837069"
},
{
"name": "Rust",
"bytes": "1241058"
},
{
"name": "Shell",
"bytes": "57720"
},
{
"name": "Starlark",
"bytes": "27937"
}
],
"symlink_target": ""
} |
from usage.domain_cache import get_domain_name
from usage.log import logging
logger = logging.getLogger('usage.licensing.common')
INDENTSIZE = 4
MAXLINESIZE = 79
FILL = {0: ' ', 1: ' '}
class Licenser(object):
"""Base licenser class with common functionality."""
def __init__(self, project_id_field='Project Id', groupby=None):
"""Inits the licenser
:param project_id_field: Name of the field in a csv report
containing the project id of a resource.
:type project_id_field: Str
:param groupby: List of fields in a report to group results by.
:type groupby: List
"""
self._project_id_field = project_id_field
if groupby is None:
groupby = ['domain']
self._groupby = groupby
self._data = {}
def _relevant(self, row):
"""Determine if row is relevant to licenser.
The base functionality is to always return True.
Subclasses should implement their own version of this method.
:param row: Row
:type row: Dict
:returns: True or False
:rtype: Boolean
"""
return True
def _row_cost(self, row):
"""Determine the cost of the row.
The base functionality is to always return 0.0.
Subclasses should implement this method.
:param row: Row
:type row: Dict
:returns: Cost of the row
:rtype: Float
"""
return 0.0
def _drill_down(self, row):
"""Drill down into the data based on groupby.
:param row: Row
:type row: Dict
:returns: Node in dictionary at bottom of groupby
:rtype: Dict
"""
current = self._data
for field in self._groupby:
if field == 'domain':
key = get_domain_name(row.get(self._project_id_field))
else:
key = row.get(field) or 'unknown'
current = current.setdefault(key, {})
return current
def _dft_total(self, node):
"""Sums totals using recursive depth first traversal.
:param node: Node in data
:type node: Dict
:returns: Total of children
:rtype: float
"""
# Base cases
if not isinstance(node, dict):
return node
if 'cost' in node:
return node['cost']
total = 0.0
for key, value in node.iteritems():
if key == 'total':
continue
total += self._dft_total(value)
node['total'] = total
return total
def _format_node(self, name, node, indents):
"""Format an output row for a node in data with children.
:param name: Name of the node
:type name: Str
:param node: Node
:type node: Dict
:param indents: Number of indents
:type indents: Int
:returns: The formatted string.
:rtype: Str
"""
indent = ' ' * indents * INDENTSIZE
number = '{:10.2f}'.format(node['total'])
formatstr = '{}{:%s<%d}{}' % (
FILL.get(indents % 2),
MAXLINESIZE - len(indent) - len(number)
)
return formatstr.format(indent, name, number)
def _format_leaf(self, name, node, indents):
"""Format an output row for a node in data without children.
:param name: Name of the node
:type name: Str
:param node: Node in data
:type node: Dict
:param indents: Number of indents
:type indents: Int
"""
indent = ' ' * indents * INDENTSIZE
number = '{:10.2f}'.format(node['cost'])
formatstr = '{}{:%s<%d}{}' % (
FILL.get(indents % 2),
MAXLINESIZE - len(indent) - len(number)
)
return formatstr.format(indent, name, number)
def _output_node(self, name, node, indents=0):
"""Print the string representation of the node.
:param name: Name of the node.
:type name: Str
:param node: Node in data
:type node: Dict
:param indents: Number of indents.
:type indents: Int
"""
# Leaf nodes will have a cost key
if 'cost' in node:
print self._format_leaf(name, node, indents)
return
print self._format_node(name, node, indents)
# Iterate over children
for node_name, node in node.iteritems():
if node_name != 'total':
self._output_node(node_name, node, indents + 1)
def handle_row(self, row):
"""Handle a row.
The base functionality is to do nothing. Subclasses should implement
this method.
:param row: Row to handle
:type row: Dict
"""
pass
def output(self):
"""Print output of the licenser."""
# Roll up total costs
self._dft_total(self._data)
# Optionally print a title
if hasattr(self, '_title'):
print self._title
print '=' * len(self._title)
# Iterate children
for node_name, node in self._data.iteritems():
if node_name != 'total':
self._output_node(node_name, node, 0)
class HourLicenser(Licenser):
"""Base hour licenser class."""
def __init__(self,
project_id_field=None,
hours_field=None,
groupby=None):
"""Inits the licenser.
:param project_id_field: Name of the field in a csv
report containing the project id for a resource.
:type project_id_field: Str
:param hours_field: Name of the field in a csv report
containing the hours of usage for a resource.
:type hours_field: Str
:param groupby: List of fields to group by.
:type groupby: List
"""
super(HourLicenser, self).__init__(
project_id_field=project_id_field,
groupby=groupby
)
self._hours_field = hours_field or 'Hours'
def handle_row(self, row):
"""Handle a csv row.
:param row: Row to handle
:type row: Dict
"""
# ignore irrelevant rows
if not self._relevant(row):
return
data = self._drill_down(row)
hours = data.setdefault('hours', 0.0)
hours += float(row.get(self._hours_field))
data['hours'] = hours
cost = data.setdefault('cost', 0.0)
cost += self._row_cost(row)
data['cost'] = cost
class CountLicenser(Licenser):
def handle_row(self, row):
"""Handle a csv row.
:param row: Row to handle
:type row: Dict
"""
if not self._relevant(row):
return
data = self._drill_down(row)
count = data.setdefault('count', 0)
count += 1
data['count'] = count
cost = data.setdefault('cost', 0.0)
cost += self._row_cost(row)
data['cost'] = cost
| {
"content_hash": "9cbc7744735b822cc430a1e9c411fe0f",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 76,
"avg_line_length": 28.818930041152264,
"alnum_prop": 0.5447665286305869,
"repo_name": "absalon-james/usage",
"id": "bfcced1d3e52c62a05251ff7fc721ff305213c6e",
"size": "7003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usage/licensing/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "160155"
}
],
"symlink_target": ""
} |
from ingenico.connect.sdk.data_object import DataObject
class CaptureStatusOutput(DataObject):
__status_code = None
@property
def status_code(self):
"""
| Numeric status code of the legacy API. It is returned to ease the migration from the legacy APIs to Worldline Connect. You should not write new business logic based on this property as it will be deprecated in a future version of the API. The value can also be found in the GlobalCollect Payment Console, in the Ogone BackOffice and in report files.
Type: int
"""
return self.__status_code
@status_code.setter
def status_code(self, value):
self.__status_code = value
def to_dictionary(self):
dictionary = super(CaptureStatusOutput, self).to_dictionary()
if self.status_code is not None:
dictionary['statusCode'] = self.status_code
return dictionary
def from_dictionary(self, dictionary):
super(CaptureStatusOutput, self).from_dictionary(dictionary)
if 'statusCode' in dictionary:
self.status_code = dictionary['statusCode']
return self
| {
"content_hash": "c9916a3df39a08dab17f2f2d42dc6b33",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 359,
"avg_line_length": 37.25806451612903,
"alnum_prop": 0.677056277056277,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "43b29fe99b5483d7f341b0912cdc2aa7b978fbde",
"size": "1306",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ingenico/connect/sdk/domain/capture/definitions/capture_status_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
} |
import uuid
from mistralclient.api import client as mistral
from oslo_config import cfg
import retrying
from st2common.query.base import Querier
from st2common.constants import action as action_constants
from st2common import log as logging
from st2common.services import action as action_service
from st2common.util import jsonify
from st2common.util.url import get_url_without_trailing_slash
from st2common.util.workflow import mistral as utils
LOG = logging.getLogger(__name__)
DONE_STATES = {
'ERROR': action_constants.LIVEACTION_STATUS_FAILED,
'SUCCESS': action_constants.LIVEACTION_STATUS_SUCCEEDED,
'CANCELLED': action_constants.LIVEACTION_STATUS_CANCELED
}
ACTIVE_STATES = {
'RUNNING': action_constants.LIVEACTION_STATUS_RUNNING
}
def get_instance():
return MistralResultsQuerier(str(uuid.uuid4()))
class MistralResultsQuerier(Querier):
def __init__(self, id, *args, **kwargs):
super(MistralResultsQuerier, self).__init__(*args, **kwargs)
self._base_url = get_url_without_trailing_slash(cfg.CONF.mistral.v2_base_url)
self._client = mistral.client(
mistral_url=self._base_url,
username=cfg.CONF.mistral.keystone_username,
api_key=cfg.CONF.mistral.keystone_password,
project_name=cfg.CONF.mistral.keystone_project_name,
auth_url=cfg.CONF.mistral.keystone_auth_url,
cacert=cfg.CONF.mistral.cacert,
insecure=cfg.CONF.mistral.insecure)
@retrying.retry(
retry_on_exception=utils.retry_on_exceptions,
wait_exponential_multiplier=cfg.CONF.mistral.retry_exp_msec,
wait_exponential_max=cfg.CONF.mistral.retry_exp_max_msec,
stop_max_delay=cfg.CONF.mistral.retry_stop_max_msec)
def query(self, execution_id, query_context):
"""
Queries mistral for workflow results using v2 APIs.
:param execution_id: st2 execution_id (context to be used for logging/audit)
:type execution_id: ``str``
:param query_context: context for the query to be made to mistral. This contains mistral
execution id.
:type query_context: ``objext``
:rtype: (``str``, ``object``)
"""
mistral_exec_id = query_context.get('mistral', {}).get('execution_id', None)
if not mistral_exec_id:
raise Exception('[%s] Missing mistral workflow execution ID in query context. %s',
execution_id, query_context)
try:
result = self._get_workflow_result(mistral_exec_id)
result['tasks'] = self._get_workflow_tasks(mistral_exec_id)
except Exception:
LOG.exception('[%s] Unable to fetch mistral workflow result and tasks. %s',
execution_id, query_context)
raise
status = self._determine_execution_status(
execution_id, result['extra']['state'], result['tasks'])
LOG.debug('[%s] mistral workflow execution status: %s' % (execution_id, status))
LOG.debug('[%s] mistral workflow execution result: %s' % (execution_id, result))
return (status, result)
def _get_workflow_result(self, exec_id):
"""
Returns the workflow status and output. Mistral workflow status will be converted
to st2 action status.
:param exec_id: Mistral execution ID
:type exec_id: ``str``
:rtype: (``str``, ``dict``)
"""
execution = self._client.executions.get(exec_id)
result = jsonify.try_loads(execution.output) if execution.state in DONE_STATES else {}
result['extra'] = {
'state': execution.state,
'state_info': execution.state_info
}
return result
def _get_workflow_tasks(self, exec_id):
"""
Returns the list of tasks for a workflow execution.
:param exec_id: Mistral execution ID
:type exec_id: ``str``
:rtype: ``list``
"""
wf_tasks = [
self._client.tasks.get(task.id)
for task in self._client.tasks.list(workflow_execution_id=exec_id)
]
return [self._format_task_result(task=wf_task.to_dict()) for wf_task in wf_tasks]
def _format_task_result(self, task):
"""
Format task result to follow the unified workflow result format.
"""
result = {
'id': task['id'],
'name': task['name'],
'workflow_execution_id': task.get('workflow_execution_id', None),
'workflow_name': task['workflow_name'],
'created_at': task.get('created_at', None),
'updated_at': task.get('updated_at', None),
'state': task.get('state', None),
'state_info': task.get('state_info', None)
}
for attr in ['result', 'input', 'published']:
result[attr] = jsonify.try_loads(task.get(attr, None))
return result
def _determine_execution_status(self, execution_id, wf_state, tasks):
# Get the liveaction object to compare state.
is_action_canceled = action_service.is_action_canceled_or_canceling(execution_id)
# Identify the list of tasks that are not still running.
active_tasks = [t for t in tasks if t['state'] in ACTIVE_STATES]
# Keep the execution in running state if there are active tasks.
# In certain use cases, Mistral sets the workflow state to
# completion prior to task completion.
if is_action_canceled and active_tasks:
status = action_constants.LIVEACTION_STATUS_CANCELING
elif is_action_canceled and not active_tasks and wf_state not in DONE_STATES:
status = action_constants.LIVEACTION_STATUS_CANCELING
elif not is_action_canceled and active_tasks and wf_state == 'CANCELLED':
status = action_constants.LIVEACTION_STATUS_CANCELING
elif wf_state in DONE_STATES and active_tasks:
status = action_constants.LIVEACTION_STATUS_RUNNING
elif wf_state in DONE_STATES and not active_tasks:
status = DONE_STATES[wf_state]
else:
status = action_constants.LIVEACTION_STATUS_RUNNING
return status
| {
"content_hash": "c164b239619d37dafc2cb5533b3215b5",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 96,
"avg_line_length": 39.5253164556962,
"alnum_prop": 0.6302642113690953,
"repo_name": "peak6/st2",
"id": "5b97d3d5d3b021db3416565365f254c9aef1e5d7",
"size": "6245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/runners/mistral_v2/query/mistral_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "42545"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4012891"
},
{
"name": "Shell",
"bytes": "41016"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import errno
import json
import os
import random
import re
import shutil
import string
import socket
import platform
import subprocess
import time
import uuid
import os.path
from googleapiclient.errors import HttpError
from subprocess import Popen, PIPE
from urllib.parse import quote_plus
import requests
from googleapiclient.discovery import build
from airflow import AirflowException, LoggingMixin
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
# Number of retries - used by googleapiclient method calls to perform retries
# For requests that are "retriable"
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Connection
from airflow.utils.db import provide_session
UNIX_PATH_MAX = 108
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 1
class CloudSqlOperationStatus:
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
UNKNOWN = "UNKNOWN"
# noinspection PyAbstractClass
class CloudSqlHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud SQL APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_conn = None
def __init__(self,
api_version,
gcp_conn_id='google_cloud_default',
delegate_to=None):
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
self.num_retries = self._get_field('num_retries', 5)
def get_conn(self):
"""
Retrieves connection to Cloud SQL.
:return: Google Cloud SQL services object.
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sqladmin', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn
@GoogleCloudBaseHook.fallback_to_default_project_id
def get_instance(self, instance, project_id=None):
"""
Retrieves a resource containing information about a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL instance resource.
:rtype: dict
"""
return self.get_conn().instances().get(
project=project_id,
instance=instance
).execute(num_retries=self.num_retries)
@GoogleCloudBaseHook.fallback_to_default_project_id
def create_instance(self, body, project_id=None):
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().insert(
project=project_id,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def patch_instance(self, body, instance, project_id=None):
"""
Updates settings of a Cloud SQL instance.
Caution: This is not a partial update, so you must include values for
all the settings that you want to retain.
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body.
:type body: dict
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().patch(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def delete_instance(self, instance, project_id=None):
"""
Deletes a Cloud SQL instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:return: None
"""
response = self.get_conn().instances().delete(
project=project_id,
instance=instance,
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def get_database(self, instance, database, project_id=None):
"""
Retrieves a database resource from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL database resource, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.
:rtype: dict
"""
return self.get_conn().databases().get(
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries)
@GoogleCloudBaseHook.fallback_to_default_project_id
def create_database(self, instance, body, project_id=None):
"""
Creates a new database inside a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().insert(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def patch_database(self, instance, database, body, project_id=None):
"""
Updates a database resource inside a Cloud SQL instance.
This method supports patch semantics.
See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be updated in the instance.
:type database: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().patch(
project=project_id,
instance=instance,
database=database,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def delete_database(self, instance, database, project_id=None):
"""
Deletes a database from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be deleted in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().delete(
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
@GoogleCloudBaseHook.fallback_to_default_project_id
def export_instance(self, instance, body, project_id=None):
"""
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump
or CSV file.
:param instance: Database instance ID of the Cloud SQL instance. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
try:
response = self.get_conn().instances().export(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
except HttpError as ex:
raise AirflowException(
'Exporting instance {} failed: {}'.format(instance, ex.content)
)
@GoogleCloudBaseHook.fallback_to_default_project_id
def import_instance(self, instance, body, project_id=None):
"""
Imports data into a Cloud SQL instance from a SQL dump or CSV file in
Cloud Storage.
:param instance: Database instance ID. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
try:
response = self.get_conn().instances().import_(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
except HttpError as ex:
raise AirflowException(
'Importing instance {} failed: {}'.format(instance, ex.content)
)
def _wait_for_operation_to_complete(self, project_id, operation_name):
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param project_id: Project ID of the project that contains the instance.
:type project_id: str
:param operation_name: Name of the operation.
:type operation_name: str
:return: None
"""
service = self.get_conn()
while True:
operation_response = service.operations().get(
project=project_id,
operation=operation_name,
).execute(num_retries=self.num_retries)
if operation_response.get("status") == CloudSqlOperationStatus.DONE:
error = operation_response.get("error")
if error:
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException(error_msg)
# No meaningful info to return from the response in case of success
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
CLOUD_SQL_PROXY_DOWNLOAD_URL = "https://dl.google.com/cloudsql/cloud_sql_proxy.{}.{}"
CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL = \
"https://storage.googleapis.com/cloudsql-proxy/{}/cloud_sql_proxy.{}.{}"
GCP_CREDENTIALS_KEY_PATH = "extra__google_cloud_platform__key_path"
GCP_CREDENTIALS_KEYFILE_DICT = "extra__google_cloud_platform__keyfile_dict"
class CloudSqlProxyRunner(LoggingMixin):
"""
Downloads and runs cloud-sql-proxy as subprocess of the Python process.
The cloud-sql-proxy needs to be downloaded and started before we can connect
to the Google Cloud SQL instance via database connection. It establishes
secure tunnel connection to the database. It authorizes using the
GCP credentials that are passed by the configuration.
More details about the proxy can be found here:
https://cloud.google.com/sql/docs/mysql/sql-proxy
"""
def __init__(self,
path_prefix,
instance_specification,
gcp_conn_id='google_cloud_default',
project_id=None,
sql_proxy_version=None,
sql_proxy_binary_path=None):
"""
Creates the proxy runner class.
:param path_prefix: Unique path prefix where proxy will be downloaded and
directories created for unix sockets.
:type path_prefix: str
:param instance_specification: Specification of the instance to connect the
proxy to. It should be specified in the form that is described in
https://cloud.google.com/sql/docs/mysql/sql-proxy#multiple-instances in
-instances parameter (typically in the form of ``<project>:<region>:<instance>``
for UNIX socket connections and in the form of
``<project>:<region>:<instance>=tcp:<port>`` for TCP connections.
:type instance_specification: str
:param gcp_conn_id: Id of Google Cloud Platform connection to use for
authentication
:type gcp_conn_id: str
:param project_id: Optional id of the GCP project to connect to - it overwrites
default project id taken from the GCP connection.
:type project_id: str
:param sql_proxy_version: Specific version of SQL proxy to download
(for example 'v1.13'). By default latest version is downloaded.
:type sql_proxy_version: str
:param sql_proxy_binary_path: If specified, then proxy will be
used from the path specified rather than dynamically generated. This means
that if the binary is not present in that path it will also be downloaded.
:type sql_proxy_binary_path: str
"""
super().__init__()
self.path_prefix = path_prefix
if not self.path_prefix:
raise AirflowException("The path_prefix must not be empty!")
self.sql_proxy_was_downloaded = False
self.sql_proxy_version = sql_proxy_version
self.download_sql_proxy_dir = None
self.sql_proxy_process = None
self.instance_specification = instance_specification
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.command_line_parameters = []
self.cloud_sql_proxy_socket_directory = self.path_prefix
self.sql_proxy_path = sql_proxy_binary_path if sql_proxy_binary_path \
else self.path_prefix + "_cloud_sql_proxy"
self.credentials_path = self.path_prefix + "_credentials.json"
self._build_command_line_parameters()
def _build_command_line_parameters(self):
self.command_line_parameters.extend(
['-dir', self.cloud_sql_proxy_socket_directory])
self.command_line_parameters.extend(
['-instances', self.instance_specification])
@staticmethod
def _is_os_64bit():
return platform.machine().endswith('64')
def _download_sql_proxy_if_needed(self):
if os.path.isfile(self.sql_proxy_path):
self.log.info("cloud-sql-proxy is already present")
return
system = platform.system().lower()
processor = "amd64" if CloudSqlProxyRunner._is_os_64bit() else "386"
if not self.sql_proxy_version:
download_url = CLOUD_SQL_PROXY_DOWNLOAD_URL.format(system, processor)
else:
download_url = CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL.format(
self.sql_proxy_version, system, processor)
proxy_path_tmp = self.sql_proxy_path + ".tmp"
self.log.info("Downloading cloud_sql_proxy from %s to %s",
download_url, proxy_path_tmp)
r = requests.get(download_url, allow_redirects=True)
# Downloading to .tmp file first to avoid case where partially downloaded
# binary is used by parallel operator which uses the same fixed binary path
with open(proxy_path_tmp, 'wb') as f:
f.write(r.content)
if r.status_code != 200:
raise AirflowException(
"The cloud-sql-proxy could not be downloaded. Status code = {}. "
"Reason = {}".format(r.status_code, r.reason))
self.log.info("Moving sql_proxy binary from %s to %s",
proxy_path_tmp, self.sql_proxy_path)
shutil.move(proxy_path_tmp, self.sql_proxy_path)
os.chmod(self.sql_proxy_path, 0o744) # Set executable bit
self.sql_proxy_was_downloaded = True
@provide_session
def _get_credential_parameters(self, session):
connection = session.query(Connection). \
filter(Connection.conn_id == self.gcp_conn_id).first()
session.expunge_all()
if GCP_CREDENTIALS_KEY_PATH in connection.extra_dejson:
credential_params = [
'-credential_file',
connection.extra_dejson[GCP_CREDENTIALS_KEY_PATH]
]
elif GCP_CREDENTIALS_KEYFILE_DICT in connection.extra_dejson:
credential_file_content = json.loads(
connection.extra_dejson[GCP_CREDENTIALS_KEYFILE_DICT])
self.log.info("Saving credentials to %s", self.credentials_path)
with open(self.credentials_path, "w") as f:
json.dump(credential_file_content, f)
credential_params = [
'-credential_file',
self.credentials_path
]
else:
self.log.info(
"The credentials are not supplied by neither key_path nor "
"keyfile_dict of the gcp connection %s. Falling back to "
"default activated account", self.gcp_conn_id)
credential_params = []
if not self.instance_specification:
project_id = connection.extra_dejson.get(
'extra__google_cloud_platform__project')
if self.project_id:
project_id = self.project_id
if not project_id:
raise AirflowException("For forwarding all instances, the project id "
"for GCP should be provided either "
"by project_id extra in the GCP connection or by "
"project_id provided in the operator.")
credential_params.extend(['-projects', project_id])
return credential_params
def start_proxy(self):
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
# Needed for python 2 compatibility (exists_ok missing)
pass
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode('utf-8')
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return
def stop_proxy(self):
"""
Stops running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
else:
self.log.info("Stopping the cloud_sql_proxy pid: %s",
self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s",
self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if not e.errno == errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s",
self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s",
self.credentials_path)
# Here file cannot be delete by concurrent task (each task has its own copy)
os.remove(self.credentials_path)
def get_proxy_version(self):
"""
Returns version of the Cloud SQL Proxy.
"""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(['--version'])
command_to_run.extend(self._get_credential_parameters())
result = subprocess.check_output(command_to_run).decode('utf-8')
pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$")
m = pattern.match(result)
if m:
return m.group(1)
else:
return None
def get_socket_path(self):
"""
Retrieves UNIX socket path used by Cloud SQL Proxy.
:return: The dynamically generated path for the socket created by the proxy.
:rtype: str
"""
return self.cloud_sql_proxy_socket_directory + "/" + self.instance_specification
CONNECTION_URIS = {
"postgres": {
"proxy": {
"tcp":
"postgresql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket":
"postgresql://{user}:{password}@{socket_path}/{database}"
},
"public": {
"ssl":
"postgresql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"sslmode=verify-ca&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}",
"non-ssl":
"postgresql://{user}:{password}@{public_ip}:{public_port}/{database}"
}
},
"mysql": {
"proxy": {
"tcp":
"mysql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket":
"mysql://{user}:{password}@localhost/{database}?"
"unix_socket={socket_path}"
},
"public": {
"ssl":
"mysql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"ssl={ssl_spec}",
"non-ssl":
"mysql://{user}:{password}@{public_ip}:{public_port}/{database}"
}
}
}
CLOUD_SQL_VALID_DATABASE_TYPES = ['postgres', 'mysql']
# noinspection PyAbstractClass
class CloudSqlDatabaseHook(BaseHook):
"""
Serves DB connection configuration for Google Cloud SQL (Connections
of *gcpcloudsql://* type).
The hook is a "meta" one. It does not perform an actual connection.
It is there to retrieve all the parameters configured in gcpcloudsql:// connection,
start/stop Cloud SQL Proxy if needed, dynamically generate Postgres or MySQL
connection in the database and return an actual Postgres or MySQL hook.
The returned Postgres/MySQL hooks are using direct connection or Cloud SQL
Proxy socket/TCP as configured.
Main parameters of the hook are retrieved from the standard URI components:
* **user** - User name to authenticate to the database (from login of the URI).
* **password** - Password to authenticate to the database (from password of the URI).
* **public_ip** - IP to connect to for public connection (from host of the URI).
* **public_port** - Port to connect to for public connection (from port of the URI).
* **database** - Database to connect to (from schema of the URI).
Remaining parameters are retrieved from the extras (URI query parameters):
* **project_id** - Optional, Google Cloud Platform project where the Cloud SQL
instance exists. If missing, default project id passed is used.
* **instance** - Name of the instance of the Cloud SQL database instance.
* **location** - The location of the Cloud SQL instance (for example europe-west1).
* **database_type** - The type of the database instance (MySQL or Postgres).
* **use_proxy** - (default False) Whether SQL proxy should be used to connect to Cloud
SQL DB.
* **use_ssl** - (default False) Whether SSL should be used to connect to Cloud SQL DB.
You cannot use proxy and SSL together.
* **sql_proxy_use_tcp** - (default False) If set to true, TCP is used to connect via
proxy, otherwise UNIX sockets are used.
* **sql_proxy_binary_path** - Optional path to Cloud SQL Proxy binary. If the binary
is not specified or the binary is not present, it is automatically downloaded.
* **sql_proxy_version** - Specific version of the proxy to download (for example
v1.13). If not specified, the latest version is downloaded.
* **sslcert** - Path to client certificate to authenticate when SSL is used.
* **sslkey** - Path to client private key to authenticate when SSL is used.
* **sslrootcert** - Path to server's certificate to authenticate when SSL is used.
:param gcp_cloudsql_conn_id: URL of the connection
:type gcp_cloudsql_conn_id: str
:param default_gcp_project_id: Default project id used if project_id not specified
in the connection URL
:type default_gcp_project_id: str
"""
_conn = None
def __init__(self, gcp_cloudsql_conn_id='google_cloud_sql_default',
default_gcp_project_id=None):
super().__init__(source=None)
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.cloudsql_connection = self.get_connection(self.gcp_cloudsql_conn_id)
self.extras = self.cloudsql_connection.extra_dejson
self.project_id = self.extras.get('project_id', default_gcp_project_id)
self.instance = self.extras.get('instance')
self.database = self.cloudsql_connection.schema
self.location = self.extras.get('location')
self.database_type = self.extras.get('database_type')
self.use_proxy = self._get_bool(self.extras.get('use_proxy', 'False'))
self.use_ssl = self._get_bool(self.extras.get('use_ssl', 'False'))
self.sql_proxy_use_tcp = self._get_bool(
self.extras.get('sql_proxy_use_tcp', 'False'))
self.sql_proxy_version = self.extras.get('sql_proxy_version')
self.sql_proxy_binary_path = self.extras.get('sql_proxy_binary_path')
self.user = self.cloudsql_connection.login
self.password = self.cloudsql_connection.password
self.public_ip = self.cloudsql_connection.host
self.public_port = self.cloudsql_connection.port
self.sslcert = self.extras.get('sslcert')
self.sslkey = self.extras.get('sslkey')
self.sslrootcert = self.extras.get('sslrootcert')
# Port and socket path and db_hook are automatically generated
self.sql_proxy_tcp_port = None
self.sql_proxy_unique_path = None
self.db_hook = None
self.reserved_tcp_socket = None
# Generated based on clock + clock sequence. Unique per host (!).
# This is important as different hosts share the database
self.db_conn_id = str(uuid.uuid1())
self._validate_inputs()
@staticmethod
def _get_bool(val):
if val == 'False':
return False
return val
@staticmethod
def _check_ssl_file(file_to_check, name):
if not file_to_check:
raise AirflowException("SSL connections requires {name} to be set".
format(name=name))
if not os.path.isfile(file_to_check):
raise AirflowException("The {file_to_check} must be a readable file".
format(file_to_check=file_to_check))
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required extra 'project_id' is empty")
if not self.location:
raise AirflowException("The required extra 'location' is empty or None")
if not self.instance:
raise AirflowException("The required extra 'instance' is empty or None")
if self.database_type not in CLOUD_SQL_VALID_DATABASE_TYPES:
raise AirflowException("Invalid database type '{}'. Must be one of {}".format(
self.database_type, CLOUD_SQL_VALID_DATABASE_TYPES
))
if self.use_proxy and self.use_ssl:
raise AirflowException("Cloud SQL Proxy does not support SSL connections."
" SSL is not needed as Cloud SQL Proxy "
"provides encryption on its own")
def validate_ssl_certs(self):
if self.use_ssl:
self._check_ssl_file(self.sslcert, "sslcert")
self._check_ssl_file(self.sslkey, "sslkey")
self._check_ssl_file(self.sslrootcert, "sslrootcert")
def validate_socket_path_length(self):
if self.use_proxy and not self.sql_proxy_use_tcp:
if self.database_type == 'postgres':
suffix = "/.s.PGSQL.5432"
else:
suffix = ""
expected_path = "{}/{}:{}:{}{}".format(
self._generate_unique_path(),
self.project_id, self.instance,
self.database, suffix)
if len(expected_path) > UNIX_PATH_MAX:
self.log.info("Too long (%s) path: %s", len(expected_path), expected_path)
raise AirflowException(
"The UNIX socket path length cannot exceed {} characters "
"on Linux system. Either use shorter instance/database "
"name or switch to TCP connection. "
"The socket path for Cloud SQL proxy is now:"
"{}".format(
UNIX_PATH_MAX, expected_path))
@staticmethod
def _generate_unique_path():
# We are not using mkdtemp here as the path generated with mkdtemp
# can be close to 60 characters and there is a limitation in
# length of socket path to around 100 characters in total.
# We append project/location/instance to it later and postgres
# appends its own prefix, so we chose a shorter "/tmp/[8 random characters]" -
random.seed()
while True:
candidate = "/tmp/" + ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
if not os.path.exists(candidate):
return candidate
@staticmethod
def _quote(value):
return quote_plus(value) if value else None
def _generate_connection_uri(self):
if self.use_proxy:
if self.sql_proxy_use_tcp:
if not self.sql_proxy_tcp_port:
self.reserve_free_tcp_port()
if not self.sql_proxy_unique_path:
self.sql_proxy_unique_path = self._generate_unique_path()
database_uris = CONNECTION_URIS[self.database_type]
ssl_spec = None
socket_path = None
if self.use_proxy:
proxy_uris = database_uris['proxy']
if self.sql_proxy_use_tcp:
format_string = proxy_uris['tcp']
else:
format_string = proxy_uris['socket']
socket_path = \
"{sql_proxy_socket_path}/{instance_socket_name}".format(
sql_proxy_socket_path=self.sql_proxy_unique_path,
instance_socket_name=self._get_instance_socket_name()
)
else:
public_uris = database_uris['public']
if self.use_ssl:
format_string = public_uris['ssl']
ssl_spec = {
'cert': self.sslcert,
'key': self.sslkey,
'ca': self.sslrootcert
}
else:
format_string = public_uris['non-ssl']
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
connection_uri = format_string.format(
user=quote_plus(self.user) if self.user else '',
password=quote_plus(self.password) if self.password else '',
database=quote_plus(self.database) if self.database else '',
public_ip=self.public_ip,
public_port=self.public_port,
proxy_port=self.sql_proxy_tcp_port,
socket_path=self._quote(socket_path),
ssl_spec=self._quote(json.dumps(ssl_spec)) if ssl_spec else '',
client_cert_file=self._quote(self.sslcert) if self.sslcert else '',
client_key_file=self._quote(self.sslkey) if self.sslcert else '',
server_ca_file=self._quote(self.sslrootcert if self.sslcert else '')
)
self.log.info("DB connection URI %s", connection_uri.replace(
quote_plus(self.password) if self.password else 'PASSWORD', 'XXXXXXXXXXXX'))
return connection_uri
def _get_instance_socket_name(self):
return self.project_id + ":" + self.location + ":" + self.instance
def _get_sqlproxy_instance_specification(self):
instance_specification = self._get_instance_socket_name()
if self.sql_proxy_use_tcp:
instance_specification += "=tcp:" + str(self.sql_proxy_tcp_port)
return instance_specification
@provide_session
def create_connection(self, session=None):
"""
Create connection in the Connection table, according to whether it uses
proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
connection = Connection(conn_id=self.db_conn_id)
uri = self._generate_connection_uri()
self.log.info("Creating connection %s", self.db_conn_id)
connection.parse_from_uri(uri)
session.add(connection)
session.commit()
@provide_session
def retrieve_connection(self, session=None):
"""
Retrieves the dynamically created connection from the Connection table.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
self.log.info("Retrieving connection %s", self.db_conn_id)
connections = session.query(Connection).filter(
Connection.conn_id == self.db_conn_id)
if connections.count():
return connections[0]
return None
@provide_session
def delete_connection(self, session=None):
"""
Delete the dynamically created connection from the Connection table.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
self.log.info("Deleting connection %s", self.db_conn_id)
connections = session.query(Connection).filter(
Connection.conn_id == self.db_conn_id)
if connections.count():
connection = connections[0]
session.delete(connection)
session.commit()
else:
self.log.info("Connection was already deleted!")
def get_sqlproxy_runner(self):
"""
Retrieve Cloud SQL Proxy runner. It is used to manage the proxy
lifecycle per task.
:return: The Cloud SQL Proxy runner.
:rtype: CloudSqlProxyRunner
"""
if not self.use_proxy:
raise AirflowException("Proxy runner can only be retrieved in case of use_proxy = True")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path
)
def get_database_hook(self):
"""
Retrieve database hook. This is the actual Postgres or MySQL database hook
that uses proxy or connects directly to the Google Cloud SQL database.
"""
if self.database_type == 'postgres':
self.db_hook = PostgresHook(postgres_conn_id=self.db_conn_id,
schema=self.database)
else:
self.db_hook = MySqlHook(mysql_conn_id=self.db_conn_id,
schema=self.database)
return self.db_hook
def cleanup_database_hook(self):
"""
Clean up database hook after it was used.
"""
if self.database_type == 'postgres':
if hasattr(self.db_hook,
'conn') and self.db_hook.conn and self.db_hook.conn.notices:
for output in self.db_hook.conn.notices:
self.log.info(output)
def reserve_free_tcp_port(self):
"""
Reserve free TCP port to be used by Cloud SQL Proxy
"""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(('127.0.0.1', 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
def free_reserved_port(self):
"""
Free TCP port. Makes it immediately ready to be used by Cloud SQL Proxy.
"""
if self.reserved_tcp_socket:
self.reserved_tcp_socket.close()
self.reserved_tcp_socket = None
| {
"content_hash": "04dbfee45d4e62d5275ac0b587e6d21c",
"timestamp": "",
"source": "github",
"line_count": 980,
"max_line_length": 100,
"avg_line_length": 43.67755102040816,
"alnum_prop": 0.6038454350060742,
"repo_name": "r39132/airflow",
"id": "2f2217482cc24e4dd24371fd7bb7c84c686c3e50",
"size": "43615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/gcp_sql_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
import sys
def dec_to_bin(input_string):
return bin(int(input_string))[2:]
if __name__ == '__main__':
inputfile = sys.argv[1]
with open(inputfile, 'r') as f:
for line in f:
if line:
print dec_to_bin(line.strip())
| {
"content_hash": "f68e87744ef9422a579a925f665bcec6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 20.384615384615383,
"alnum_prop": 0.5320754716981132,
"repo_name": "MikeDelaney/CodeEval",
"id": "433991fa626346491813f3e3d66ea58bface9141",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moderate/decimal_to_binary/dec_to_bin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35349"
}
],
"symlink_target": ""
} |
import sys
import aetros
if __name__ == '__main__':
sys.exit(aetros.main()) | {
"content_hash": "b03cd9ebc3d588efeb6c44d58196e937",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 27,
"avg_line_length": 16,
"alnum_prop": 0.6,
"repo_name": "aetros/aetros-cli",
"id": "6eeffea1df8739182f48402d1f66997b5524c492",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aetros/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "444876"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import os
import networkx as nx
import sys
'''
Author:Oana Ursu
'''
def main():
parser=OptionParser()
parser.add_option('--input',dest='input',help='Input')
parser.add_option('--input_type',dest='input_type',help='Input type. Can be phen or DE')
parser.add_option('--PPI',dest='ppi',help='PPI pkl',
default='/nfs/vendata/oursu/oana/Gem_paper/data/interactome/9606.mitab.01192011.uniq_miscore-localirefindex3-20110831.digraphno_UBC,EP300_symbol.pkl')
parser.add_option('--expressed',dest='expressed',help='Expressed genes',default='/nfs/vendata/oursu/oana/paper_analysis/networks/2013-04-30/_FDR_0.05thresh0.3_vehMin_0.5_lfc_0.585_minFPKM_0.1_p_0.05_pseudocounts_exprProtsFPKM_0.1/0.1FPKM.genes')
parser.add_option('--TFgene',dest='TFgene',help='TFgene network')
parser.add_option('--out',dest='out',help='Out name')
opts,args=parser.parse_args()
#Get PPI nodes
if opts.input_type=='phen' or opts.input_type=='TFgene':
ppi=nx.read_gpickle(opts.ppi)
ppi_nodes=ppi.nodes()
#Get expressed
expressed=set()
for line in open(opts.expressed,'r').readlines():
expressed.add(line.strip())
#setup the analysis for TFgene files here
if opts.input_type=='TFgene':
print 'processing tfgene'
out=open(opts.out,'w')
counte=0
for line in open(opts.input,'r').readlines():
items=line.strip().split()
tf=items[0]
gene=items[1]
score=items[2]
if tf not in ppi_nodes:
if tf=='RARB':
print 'losing '+tf+' because not in PPI'
continue
#if gene not in expressed:
# continue
if tf not in expressed:
if tf=='RARB':
print 'losing '+tf+' because not expressed'
continue
out.write(tf+'\t'+gene+'\t'+score+'\n')
out.close()
sys.exit()
#Get TFgene
TFgene_genes=set()
if opts.input_type=='DE':
for line in open(opts.TFgene,'r').readlines():
TFgene_genes.add(line.strip().split('\t')[1])
#Read in input
input_genes={}
for line in open(opts.input,'r').readlines():
items=line.strip().split('\t')
if items[0]=='NA':
continue
input_genes[items[0]]={}
input_genes[items[0]]['score']=items[1]
print 'New dataset ---------------------------------'
#Check if expressed
for input_gene in input_genes.keys():
input_gene_split=input_gene.split('_')
#print input_gene_split
for gene in input_gene_split:
#print 'checking '+gene
if gene in expressed:
print gene+' is expressed'
input_genes[input_gene]['expressed']=True
else:
print gene+' is NOT expressed'
#For phen, check if in interactome
if opts.input_type=='phen':
for input_gene in input_genes.keys():
genes=input_gene.split('_')
#Find first gene name that is in interactome and keep it
for gene in genes:
if gene in ppi_nodes:
if 'inNet' not in input_genes[input_gene].keys():
input_genes[input_gene]['inNet']=[]
#ONLY ADD IT IF EXPRESSED TOO
if gene in expressed:
input_genes[input_gene]['inNet'].append(gene)
#For DE genes, check it is in the TFgene network
if opts.input_type=='DE':
for input_gene in input_genes.keys():
if input_gene in TFgene_genes:
if 'inNet' not in input_genes[input_gene].keys():
input_genes[input_gene]['inNet']=[]
input_genes[input_gene]['inNet'].append(input_gene)
#If multiple inputs are in the network under the same name, take the one with the highest score
gene_to_highest_scoring_input={}
for input_gene in input_genes.keys():
if 'inNet' not in input_genes[input_gene].keys():
continue
else:
genes=input_genes[input_gene]['inNet']
for gene in genes:
if gene not in gene_to_highest_scoring_input.keys():
gene_to_highest_scoring_input[gene]=input_gene
if float(input_genes[input_gene]['score'])>float(input_genes[gene_to_highest_scoring_input[gene]]['score']):
gene_to_highest_scoring_input[gene]=input_gene
out=open(opts.out,'w')
#Write down the input to keep
for input_gene in input_genes.keys():
#print input_gene
#print input_genes[input_gene]
written=False
if 'expressed' in input_genes[input_gene].keys():
if input_genes[input_gene]['expressed']==True:
if 'inNet' in input_genes[input_gene].keys():
for gene in input_genes[input_gene]['inNet']:
if not written:
if gene_to_highest_scoring_input[gene]==input_gene:
out.write(gene+'\t'+input_genes[input_gene]['score']+'\n')
written=True
out.close()
if __name__=='__main__':
main()
| {
"content_hash": "b09717bcd543688ee873f0c926def91a",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 249,
"avg_line_length": 38.61594202898551,
"alnum_prop": 0.5586413961343591,
"repo_name": "oursu/Gem_code",
"id": "74129053b7297d5d35d59cc4d181d9fd71b127f1",
"size": "5329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inputs/prepare_input_for_SAMNet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1006459"
},
{
"name": "Python",
"bytes": "11083"
},
{
"name": "R",
"bytes": "27520"
},
{
"name": "Shell",
"bytes": "15670"
}
],
"symlink_target": ""
} |
"""Functionality related with filters in a PyTables file."""
from __future__ import absolute_import
# Imports
# =======
import warnings
import numpy
from . import utilsextension, blosc_compressor_list, blosc_compcode_to_compname
from .exceptions import FiltersWarning
import six
# Public variables
# ================
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
all_complibs = ['zlib', 'lzo', 'bzip2', 'blosc']
all_complibs += ['blosc:%s' % cname for cname in blosc_compressor_list()]
"""List of all compression libraries."""
foreign_complibs = ['szip']
"""List of known but unsupported compression libraries."""
default_complib = 'zlib'
"""The default compression library."""
# Private variables
# =================
_shuffle_flag = 0x1
_fletcher32_flag = 0x2
_rounding_flag = 0x4
# Classes
# =======
class Filters(object):
"""Container for filter properties.
This class is meant to serve as a container that keeps information about
the filter properties associated with the chunked leaves, that is Table,
CArray, EArray and VLArray.
Instances of this class can be directly compared for equality.
Parameters
----------
complevel : int
Specifies a compression level for data. The allowed
range is 0-9. A value of 0 (the default) disables
compression.
complib : str
Specifies the compression library to be used. Right now, 'zlib' (the
default), 'lzo', 'bzip2' and 'blosc' are supported. Additional
compressors for Blosc like 'blosc:blosclz' ('blosclz' is the default
in case the additional compressor is not specified), 'blosc:lz4',
'blosc:lz4hc', 'blosc:snappy' and 'blosc:zlib' are supported too.
Specifying a compression library which is not available in the
system issues a FiltersWarning and sets the library to the default
one.
shuffle : bool
Whether or not to use the *Shuffle*
filter in the HDF5 library. This is normally used to improve
the compression ratio. A false value disables shuffling and
a true one enables it. The default value depends on whether
compression is enabled or not; if compression is enabled,
shuffling defaults to be enabled, else shuffling is
disabled. Shuffling can only be used when compression is enabled.
fletcher32 : bool
Whether or not to use the
*Fletcher32* filter in the HDF5 library.
This is used to add a checksum on each data chunk. A false
value (the default) disables the checksum.
least_significant_digit : int
If specified, data will be truncated (quantized). In conjunction
with enabling compression, this produces 'lossy', but
significantly more efficient compression. For example, if
*least_significant_digit=1*, data will be quantized using
``around(scale*data)/scale``, where ``scale = 2**bits``, and
bits is determined so that a precision of 0.1 is retained (in
this case bits=4). Default is *None*, or no quantization.
.. note::
quantization is only applied if some form of compression is
enabled
Examples
--------
This is a small example on using the Filters class::
import numpy
import tables
fileh = tables.open_file('test5.h5', mode='w')
atom = Float32Atom()
filters = Filters(complevel=1, complib='blosc', fletcher32=True)
arr = fileh.create_earray(fileh.root, 'earray', atom, (0,2),
"A growable array", filters=filters)
# Append several rows in only one call
arr.append(numpy.array([[1., 2.],
[2., 3.],
[3., 4.]], dtype=numpy.float32))
# Print information on that enlargeable array
print("Result Array:")
print(repr(arr))
fileh.close()
This enforces the use of the Blosc library, a compression level of 1 and a
Fletcher32 checksum filter as well. See the output of this example::
Result Array:
/earray (EArray(3, 2), fletcher32, shuffle, blosc(1)) 'A growable array'
type = float32
shape = (3, 2)
itemsize = 4
nrows = 3
extdim = 0
flavor = 'numpy'
byteorder = 'little'
.. rubric:: Filters attributes
.. attribute:: fletcher32
Whether the *Fletcher32* filter is active or not.
.. attribute:: complevel
The compression level (0 disables compression).
.. attribute:: complib
The compression filter used (irrelevant when compression is not
enabled).
.. attribute:: shuffle
Whether the *Shuffle* filter is active or not.
"""
@classmethod
def _from_leaf(class_, leaf):
# Get a dictionary with all the filters
parent = leaf._v_parent
filters_dict = utilsextension.get_filters(parent._v_objectid,
leaf._v_name)
if filters_dict is None:
filters_dict = {} # not chunked
kwargs = dict(complevel=0, shuffle=False, fletcher32=False, # all off
least_significant_digit=None, _new=False)
for (name, values) in six.iteritems(filters_dict):
if name == 'deflate':
name = 'zlib'
if name in all_complibs:
kwargs['complib'] = name
if name == "blosc":
kwargs['complevel'] = values[4]
# Shuffle filter is internal to blosc
if values[5]:
kwargs['shuffle'] = True
# In Blosc 1.3 another parameter is used for the compressor
if len(values) > 6:
cname = blosc_compcode_to_compname(values[6])
kwargs['complib'] = "blosc:%s" % cname
else:
kwargs['complevel'] = values[0]
elif name in foreign_complibs:
kwargs['complib'] = name
kwargs['complevel'] = 1 # any nonzero value will do
elif name in ['shuffle', 'fletcher32']:
kwargs[name] = True
return class_(**kwargs)
@classmethod
def _unpack(class_, packed):
"""Create a new `Filters` object from a packed version.
>>> Filters._unpack(0)
Filters(complevel=0, shuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x101)
Filters(complevel=1, complib='zlib', shuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x30109)
Filters(complevel=9, complib='zlib', shuffle=True, fletcher32=True, least_significant_digit=None)
>>> Filters._unpack(0x3010A)
Traceback (most recent call last):
...
ValueError: compression level must be between 0 and 9
>>> Filters._unpack(0x1)
Traceback (most recent call last):
...
ValueError: invalid compression library id: 0
"""
kwargs = {'_new': False}
# Byte 0: compression level.
kwargs['complevel'] = complevel = packed & 0xff
packed >>= 8
# Byte 1: compression library id (0 for none).
if complevel > 0:
complib_id = int(packed & 0xff)
if not (0 < complib_id <= len(all_complibs)):
raise ValueError("invalid compression library id: %d"
% complib_id)
kwargs['complib'] = all_complibs[complib_id - 1]
packed >>= 8
# Byte 2: parameterless filters.
kwargs['shuffle'] = packed & _shuffle_flag
kwargs['fletcher32'] = packed & _fletcher32_flag
has_rounding = packed & _rounding_flag
packed >>= 8
# Byte 3: least significant digit.
if has_rounding:
kwargs['least_significant_digit'] = numpy.int8(packed & 0xff)
else:
kwargs['least_significant_digit'] = None
return class_(**kwargs)
def _pack(self):
"""Pack the `Filters` object into a 64-bit NumPy integer."""
packed = numpy.int64(0)
# Byte 3: least significant digit.
if self.least_significant_digit is not None:
#assert isinstance(self.least_significant_digit, numpy.int8)
packed |= self.least_significant_digit
packed <<= 8
# Byte 2: parameterless filters.
if self.shuffle:
packed |= _shuffle_flag
if self.fletcher32:
packed |= _fletcher32_flag
if self.least_significant_digit:
packed |= _rounding_flag
packed <<= 8
# Byte 1: compression library id (0 for none).
if self.complevel > 0:
packed |= all_complibs.index(self.complib) + 1
packed <<= 8
# Byte 0: compression level.
packed |= self.complevel
return packed
def __init__(self, complevel=0, complib=default_complib,
shuffle=True, fletcher32=False,
least_significant_digit=None, _new=True):
if not (0 <= complevel <= 9):
raise ValueError("compression level must be between 0 and 9")
if _new and complevel > 0:
# These checks are not performed when loading filters from disk.
if complib not in all_complibs:
raise ValueError(
"compression library ``%s`` is not supported; "
"it must be one of: %s"
% (complib, ", ".join(all_complibs)))
if utilsextension.which_lib_version(complib) is None:
warnings.warn("compression library ``%s`` is not available; "
"using ``%s`` instead"
% (complib, default_complib), FiltersWarning)
complib = default_complib # always available
complevel = int(complevel)
complib = str(complib)
shuffle = bool(shuffle)
fletcher32 = bool(fletcher32)
if least_significant_digit is not None:
least_significant_digit = numpy.int8(least_significant_digit)
if complevel == 0:
# Override some inputs when compression is not enabled.
complib = None # make it clear there is no compression
shuffle = False # shuffling and not compressing makes no sense
least_significant_digit = None
elif complib not in all_complibs:
# Do not try to use a meaningful level for unsupported libs.
complevel = -1
self.complevel = complevel
"""The compression level (0 disables compression)."""
self.complib = complib
"""The compression filter used (irrelevant when compression is
not enabled).
"""
self.shuffle = shuffle
"""Whether the *Shuffle* filter is active or not."""
self.fletcher32 = fletcher32
"""Whether the *Fletcher32* filter is active or not."""
self.least_significant_digit = least_significant_digit
"""The least significant digit to which data shall be truncated."""
def __repr__(self):
args, complevel = [], self.complevel
if complevel >= 0: # meaningful compression level
args.append('complevel=%d' % complevel)
if complevel != 0: # compression enabled (-1 or > 0)
args.append('complib=%r' % self.complib)
args.append('shuffle=%s' % self.shuffle)
args.append('fletcher32=%s' % self.fletcher32)
args.append(
'least_significant_digit=%s' % self.least_significant_digit)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def __str__(self):
return repr(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__dict__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.complevel, self.complib,
# self.shuffle, self.fletcher32))
def copy(self, **override):
"""Get a copy of the filters, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as keyword
arguments.
Using this method is recommended over replacing the attributes of an
instance, since instances of this class may become immutable in the
future::
>>> filters1 = Filters()
>>> filters2 = filters1.copy()
>>> filters1 == filters2
True
>>> filters1 is filters2
False
>>> filters3 = filters1.copy(complevel=1) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: compression library ``None`` is not supported...
>>> filters3 = filters1.copy(complevel=1, complib='zlib')
>>> print(filters1)
Filters(complevel=0, shuffle=False, fletcher32=False, least_significant_digit=None)
>>> print(filters3)
Filters(complevel=1, complib='zlib', shuffle=False, fletcher32=False, least_significant_digit=None)
>>> filters1.copy(foobar=42)
Traceback (most recent call last):
...
TypeError: __init__() got an unexpected keyword argument 'foobar'
"""
newargs = self.__dict__.copy()
newargs.update(override)
return self.__class__(**newargs)
# Main part
# =========
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| {
"content_hash": "634ebba92a9461d39da52fc9b07ab92d",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 111,
"avg_line_length": 35.66921119592875,
"alnum_prop": 0.5840348123840776,
"repo_name": "jennolsen84/PyTables",
"id": "6530fbd24a25a096084e0087629134c44323917c",
"size": "14299",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tables/filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3325716"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
} |
import MySQLdb as mdb
import sqlite3
import psycopg2
from MonQL.Inspect._utils import *
import traceback
class _SqlBase(db_comm):
_con=None
def __init__(self, con_sets):
self._con_sets = con_sets
self._setup_types()
self._con_cur()
self.db_name = con_sets.get('dbname', None)
def _con_cur(self):
if not self._con:
self._con = self._get_con()
self._cur = self._con.cursor()
def get_tables(self, dbname):
return self._get_tables(dbname)
def get_table_fields(self, dbname, t_name):
sql = 'SELECT * FROM %s.%s LIMIT 1' % (dbname, t_name)
return self.get_query_fields(sql)
def server_info(self):
return ''
def get_query_fields(self, sql, ex_type = None):
if 'limit' not in sql.lower():
sql += ' LIMIT 1'
cur, fields = self._execute_get_descrition(sql)
values = cur.fetchone()
if values:
for field, v in zip(fields, values):
if field[1] is None:
field[1] = type(v).__name__
self._close()
return fields
def get_values(self, dbname, t_name, limit = 20):
cur = self._execute('SELECT * FROM %s.%s LIMIT %d' % (dbname, t_name, limit))
return self._process_data(cur.fetchall())[0]
def execute(self, sql, ex_type = None):
try:
cur = self._execute(sql)
data = cur.fetchall()
except mdb.Error, e:
error = 'ERROR %s: %s' % (type(e).__name__, e.args[1])
return False, error, None
except Exception, e:
error = 'ERROR %s: %s' % (type(e).__name__, str(e))
return False, error, None
else:
result, fields = self._process_data(data)
print 'success %d results' % len(result)
return True, result, fields
finally:
self._close()
def _execute(self, command):
try:
self._con_cur()
self._cur.execute(command)
return self._cur
except Exception, e:
print "Error: %s" % str(e)
print 'SQL: %s' % command
self._close()
raise(e)
def _process_data(self, data):
fields = [col[0] for col in self._cur.description]
name_fields = [i for i, f in enumerate(fields) if 'dbname' in f]
i2 = 1
if len(name_fields) > 0:
i2 = name_fields[0]
data2 = []
i = 0
for row in data:
label = self._create_label(row[0], row[i2])
values = [self._smart_text(d) for d in row]
data2.append((values, label))
i += 1
if i > MAX_ROWS:
break
self._close()
return data2, fields
def _execute_get_descrition(self, sql):
cur = self._execute(sql)
fields = []
for col in cur.description:
self._process_column(col, fields)
return cur, fields
def _setup_types(self):
self._types = {}
for t in dir(mdb.constants.FIELD_TYPE):
if not t.startswith('_'):
v = getattr(mdb.constants.FIELD_TYPE, t)
self._types[v] = t
def _close(self):
try:
self._con.close()
self._con = None
except:
pass
class MySQL(_SqlBase):
def _get_con(self):
return mdb.connect(self._con_sets['host'], self._con_sets.get('user', None),
self._con_sets.get('pass', None), port=self._con_sets['port'])
def get_version(self):
cur = self._execute('SELECT VERSION()')
return cur.fetchone()
def get_databases(self):
cur = self._execute('SHOW DATABASES')
dbs = [info[0] for info in cur.fetchall()]
self._close()
return dbs
def _execute_get_descrition(self, sql):
cur = self._execute(sql)
fields = []
for col in cur.description:
self._process_column(col, fields)
return cur, fields
def _get_tables(self, dbname):
tables = []
cur, fields = self._execute_get_descrition('SHOW TABLE STATUS IN %s' % dbname)
field_names = [i[0] for i in fields]
for t_info in cur.fetchall():
tables.append((t_info[0], t_info))
self._close()
return tables, field_names
def _process_column(self, col, fields):
fields.append((col[0], self._types[col[1]]))
class PostgreSQL(_SqlBase):
def _get_con(self):
return psycopg2.connect('user=%(user)s host=%(host)s dbname = %(dbname)s' % self._con_sets)
def get_version(self):
info = self.server_info()
if ' on' in info:
return info[:info.index(' on')]
return info
def server_info(self):
cur = self._execute('SELECT VERSION()')
return cur.fetchone()[0]
def get_table_fields(self, dbname, t_name):
sql = 'SELECT * FROM %s LIMIT 1' % t_name
return self.get_query_fields(sql)
def get_databases(self):
cur = self._execute('SELECT datname FROM pg_database WHERE datistemplate = false')
dbs = [info[0] for info in cur.fetchall()]
self._close()
return dbs
def _get_tables(self, dbname):
tables = []
cur, fields = self._execute_get_descrition('SELECT * FROM information_schema.tables')
# tables = [': '.join(t) for t in cur.fetchall()]
field_names = [i[0] for i in fields]
for t_info in cur.fetchall():
tables.append((t_info[2], t_info))
self._close()
return tables, field_names
def _process_column(self, col, fields):
fields.append([col.name, self._types.get(col.type_code, None)])
class SQLite(_SqlBase):
def _get_con(self):
return sqlite3.connect(self._con_sets['path'])
def get_version(self):
return sqlite3.sqlite_version
def get_databases(self):
return []
def _get_tables(self, dbname):
tables = []
cur, fields = self._execute_get_descrition("SELECT * FROM sqlite_master WHERE type='table';")
field_names = [i[0] for i in fields]
for t_info in cur.fetchall():
tables.append((t_info[1], t_info))
self._close()
return tables, field_names
def _process_column(self, col, fields):
fields.append([col[0], None])
| {
"content_hash": "c942a7bf22162c03031df5d0237d4cd5",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 101,
"avg_line_length": 31.301435406698566,
"alnum_prop": 0.530265973708346,
"repo_name": "samuelcolvin/MonQL",
"id": "6083dde5a1c55b48b4025b8071c431f5e0ad9718",
"size": "6542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MonQL/Inspect/_sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "745"
},
{
"name": "JavaScript",
"bytes": "6149"
},
{
"name": "Python",
"bytes": "26970"
}
],
"symlink_target": ""
} |
import argparse
import atexit
import copy
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
try:
import django
except ImportError as e:
raise RuntimeError(
'Django module not found, reference tests/README.rst for instructions.'
) from e
else:
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY37
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango40Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel, start_at, start_after):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
def _module_match_label(module_label, label):
# Exact or ancestor match.
return module_label == label or module_label.startswith(label + '.')
# Load all the test model apps.
test_modules = get_test_modules()
found_start = not (start_at or start_after)
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
if not found_start:
if start_at and _module_match_label(module_label, start_at):
found_start = True
elif start_after and _module_match_label(module_label, start_after):
found_start = True
continue
else:
continue
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
_module_match_label(module_label, label) for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags,
test_name_patterns, start_at, start_after, pdb, buffer):
state = setup(verbosity, test_labels, parallel, start_at, start_after)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--headless', action='store_true',
help='Run selenium tests in headless mode, if the browser supports the option.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--start-after', dest='start_after',
help='Run tests starting after the specified top-level module.',
)
parser.add_argument(
'--start-at', dest='start_at',
help='Run tests starting at the specified top-level module.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs the PDB debugger on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output of passing tests.',
)
if PY37:
parser.add_argument(
'-k', dest='test_name_patterns', action='append',
help=(
'Only run test methods and classes matching test name pattern. '
'Same as unittest -k option. Can be used multiple times.'
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [options.start_at, options.start_after, options.modules]
enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True)
if enabled_module_options > 1:
print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.')
sys.exit(1)
for opt_name in ['start_at', 'start_after']:
opt_val = getattr(options, opt_name)
if opt_val:
if '.' in opt_val:
print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-'))
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
elif options.pair:
paired_tests(
options.pair, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
getattr(options, 'test_name_patterns', None),
options.start_at, options.start_after, options.pdb, options.buffer,
)
if failures:
sys.exit(1)
| {
"content_hash": "556adb36755f70e2cb3f6e73101bd1b5",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 113,
"avg_line_length": 38.068062827225134,
"alnum_prop": 0.6413148122679136,
"repo_name": "simonw/django",
"id": "1282538a6d04370584b896a62af99939fe6bc931",
"size": "21835",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import logging
import sys
import numpy as np
import sklearn.linear_model
from .yatsm import YATSM
from ..accel import try_jit
from ..errors import TSLengthException
from ..masking import smooth_mask, multitemp_mask
from ..regression.diagnostics import rmse
# Setup
logger = logging.getLogger('yatsm_algo')
@try_jit(nopython=True)
def _monitor_calc_scores(X, Y, here, scores, predictions, rmse,
test_indices, min_rmse):
""" Calculate monitoring period scaled residuals
"""
for i in range(scores.shape[1]):
for i_b, b in enumerate(test_indices):
scores[i_b, i] = (
(Y[b, here + i] - predictions[i_b, i]) /
max(min_rmse[b], rmse[i_b])
)
class CCDCesque(YATSM):
"""Initialize a CCDC-like model for data X (spectra) and Y (dates)
An unofficial and unvalidated port of the Continuous Change Detection and
Classification (CCDC) algorithm by Zhu and Woodcock, 2014.
Args:
test_indices (numpy.ndarray): Test for changes with these
indices of ``Y``. If not provided, all series in ``Y`` will be used
as test indices
estimator (dict): dictionary containing estimation model from
``scikit-learn`` used to fit and predict timeseries and,
optionally, a dict of options for the estimation model ``fit``
method (default: ``{'object': Lasso(alpha=20), 'fit': {}}``)
consecutive (int): Consecutive observations to trigger change
threshold (float): Test statistic threshold for change
min_obs (int): Minimum observations in model
min_rmse (float): Minimum RMSE for models during testing
retrain_time (float): Number of days between model fit updates during
monitoring period
screening (str): Style of prescreening of the timeseries for noise.
Options are 'RLM' or 'LOWESS' (default: RLM)
screening_crit (float): critical value for multitemporal
noise screening (default: 400.0)
remove_noise (bool): Remove observation if change is not
detected but first observation is above threshold (if it looks like
noise) (default: True)
green_band (int): Index of green band in ``Y`` for
multitemporal masking (default: 1)
swir1_band (int): Index of first SWIR band in ``Y`` for
multitemporal masking (default: 4)
dynamic_rmse (bool): Vary RMSE as a function of day of year
(default: False)
slope_test (float or bool): Use an additional slope test to
assess the suitability of the training period. A value of True
enables the test and uses the `threshold` parameter as the test
criterion. False turns off the test or a float value enables the
test but overrides the test criterion threshold. (default: False)
idx_slope (int): if ``slope_test`` is enabled, provide index of ``X``
containing slope term (default: 1)
.. document private functions
.. automethod:: _get_dynamic_rmse
.. automethod:: _get_model_rmse
"""
ndays = 365.25
def __init__(self,
test_indices=None,
estimator={'object': sklearn.linear_model.Lasso(alpha=20),
'fit': {}},
consecutive=5, threshold=2.56, min_obs=None, min_rmse=None,
retrain_time=365.25, screening='RLM', screening_crit=400.0,
remove_noise=True, green_band=1, swir1_band=4,
dynamic_rmse=False, slope_test=False, idx_slope=1,
**kwargs):
# Parent sets up test_indices and lm
super(CCDCesque, self).__init__(test_indices, estimator, **kwargs)
# Store model hyperparameters
self.consecutive = consecutive
self.threshold = threshold
self.min_obs = min_obs or 16
self.min_rmse = min_rmse
self.retrain_time = retrain_time
# Define screening method according to type
if screening == 'RLM':
self.screen_timeseries = self._screen_timeseries_RLM
logger.debug('Using RLM for screening')
elif screening == 'LOWESS':
self.screen_timeseries = self._screen_timeseries_LOWESS
logger.debug('Using LOWESS for screening')
else:
raise TypeError('Unknown screening type %s' % screening)
self.screening_crit = screening_crit
self.remove_noise = remove_noise
self.green_band = green_band
self.swir1_band = swir1_band
self.slope_test = slope_test
if self.slope_test is True:
self.slope_test = threshold
self.idx_slope = idx_slope
if dynamic_rmse:
self.get_rmse = self._get_dynamic_rmse
else:
self.get_rmse = self._get_model_rmse
@property
def record_template(self):
""" YATSM record template for features in X and series in Y
Record template will set `px` and `py` if defined as class attributes.
Otherwise `px` and `py` coordinates will default to 0.
Returns:
numpy.ndarray: NumPy structured array containing a template of a
YATSM record
"""
record_template = np.zeros(1, dtype=[
('start', 'i4'),
('end', 'i4'),
('break', 'i4'),
('coef', 'float32', (self.n_features, self.n_series)),
('rmse', 'float32', (self.n_series)),
('magnitude', 'float32', self.n_series),
('px', 'u2'),
('py', 'u2')
])
record_template['px'] = self.px
record_template['py'] = self.py
return record_template
# HELPER PROPERTIES
@property
def span_time(self):
""" Return time span (in days) between start and end of model """
return abs(self.dates[self.here] - self.dates[self.start])
@property
def span_index(self):
""" Return time span (in index) between start and end of model """
return (self.here - self.start)
@property
def running(self):
""" Determine if timeseries can run """
return self.here < len(self.dates)
@property
def can_monitor(self):
""" Determine if timeseries can monitor the future consecutive obs """
return self.here < len(self.dates) - self.consecutive - 1
# MAIN LOOP
def fit(self, X, Y, dates):
""" Fit timeseries model
Args:
X (numpy.ndarray): design matrix (number of observations x number
of features)
Y (numpy.ndarray): independent variable matrix (number of series x
number of observations)
dates (numpy.ndarray): ordinal dates for each observation in X/Y
Returns:
numpy.ndarray: NumPy structured array containing timeseries
model attribute information
"""
if len(dates) != X.shape[0] or len(dates) != Y.shape[1]:
raise ValueError('X/Y/dates must have same number of observations')
self.X = np.asarray(X, dtype=np.float64)
self.Y = np.asarray(Y, dtype=np.float64)
self.dates = dates
self.n_features = X.shape[1]
self.n_series = Y.shape[0]
# Setup test indices
if not np.any(np.asarray(self.test_indices)):
self.test_indices = np.arange(self.n_series)
# Setup minimum RMSE
if isinstance(self.min_rmse, (list, np.ndarray)):
self.min_rmse = np.asarray(self.min_rmse)
elif isinstance(self.min_rmse, (int, float)):
self.min_rmse = np.array([self.min_rmse] * self.n_series)
else:
self.min_rmse = np.array([sys.float_info.min] * self.n_series)
# Set or reset state variables
self.reset()
if len(dates) < self.here + self.consecutive:
raise TSLengthException('Not enough observations (n = %s)' %
len(dates))
self.n_record = 0
self.record = np.copy(self.record_template)
while self.running:
while not self.monitoring and self.can_monitor:
self.train()
self.here += 1
# Ensure all bands are fit in case we can't monitor
# First check if there are enough obs to estimate model
if self.span_index > self.n_features:
self._update_model()
while self.monitoring and self.can_monitor:
# Update model if required
self._update_model()
# Perform monitoring check
self.monitor()
# Iterate forward
self.here += 1
self.here += 1
# Update record for last model
self.record[self.n_record]['start'] = self.dates[self.start]
# Re-adjust end for consecutive, and for two ``self.here += 1`` calls
offset = 1 + (1 if self.monitoring else 0)
self.record[self.n_record]['end'] = self.dates[
self.here - self.consecutive - offset]
for i, m in enumerate(self.models):
self.record[self.n_record]['coef'][:, i] = m.coef
self.record[self.n_record]['rmse'][i] = m.rmse
# If we ended without being able to monitor again, delete last model
# since it will be empty
# TODO: fit this time period with median
if not self.monitoring:
self.record = self.record[:-1]
return self.record
def reset(self):
""" Reset state information required for model fittings
"""
# Location information
self.start = 0
self.here = self.min_obs
self._here = self.here
self.trained_date = 0
self.monitoring = False
# Populate prediction models
if len(self.models) == 0:
self.models = np.array([sklearn.clone(self.estimator) for
i in range(self.n_series)])
for m in self.models: # initialize additional attributes
m.rmse = 0.0
m.coef = np.zeros(self.X.shape[1])
# Training period test calculations
self.start_resid = np.zeros(len(self.test_indices))
self.end_resid = np.zeros(len(self.test_indices))
self.slope_resid = np.zeros(len(self.test_indices))
# Monitoring period calculations
self.predictions = np.zeros((len(self.test_indices), self.consecutive),
dtype=np.float64)
self.scores = np.zeros((len(self.test_indices), self.consecutive),
dtype=np.float64)
def train(self):
""" Train time series model if stability criteria are met
Stability criteria (Equation 5 in Zhu and Woodcock, 2014) include a
test on the change in reflectance over the training period (slope test)
and a test on the magnitude of the residuals for the first and last
observations in the training period. Training periods with large slopes
can indicate that a disturbance process is still in progress. Large
residuals on the first or last observations have high leverage on the
estimated regression and should be excluded from the training period.
1. Slope test:
.. math::
\\frac{1}{n}\sum\limits_{b\in B_{test}}\\frac{
\left|\\beta_{slope,b}(t_{end}-t_{start})\\right|}
{RMSE_b} > T_{crit}
2. First and last residual tests:
.. math::
\\frac{1}{n}\sum\limits_{b\in B_{test}}\\frac{
\left|\hat\\rho_{b,i=1} - \\rho_{b,i=1}\\right|}
{RMSE_b} > T_{crit}
\\frac{1}{n}\sum\limits_{b\in B_{test}}\\frac{
\left|\hat\\rho_{b,i=N} - \\rho_{b,i=N}\\right|}
{RMSE_b} > T_{crit}
"""
# Test if we can train yet
if self.span_time <= self.ndays or self.span_index < self.n_features:
logger.debug('Could not train - moving forward')
return
# Check if screening was OK
if not self.screen_timeseries():
return
# Test if we can still run after noise removal
if self.here >= self._X.shape[0]:
raise TSLengthException('Not enough observations to proceed '
'after noise removal')
# After noise removal, try to fit models
self.fit_models(self._X[self.start:self.here + 1, :],
self._Y[:, self.start:self.here + 1],
bands=self.test_indices)
# Ensure first and last points aren't unusual
for i, b in enumerate(self.test_indices):
m = self.models[b]
_rmse = max(self.min_rmse[b], m.rmse)
self.start_resid[i] = (
np.abs(self._Y[b, self.start] -
m.predict(self._X[self.start, :][None, :])) /
_rmse)
self.end_resid[i] = (
np.abs(self._Y[b, self.here] -
m.predict(self._X[self.here, :][None, :])) /
_rmse)
self.slope_resid[i] = (
np.abs(m.coef_[self.idx_slope] * (self.here - self.start)) /
_rmse)
test_start = np.linalg.norm(self.start_resid)
test_end = np.linalg.norm(self.end_resid)
test_slope = np.linalg.norm(self.slope_resid)
if (test_start > self.threshold or test_end > self.threshold or
(self.slope_test and test_slope > self.threshold)):
logger.debug('Training period unstable')
self.start += 1
self.here = self._here
return
self.X = self._X
self.Y = self._Y
self.dates = self._dates
logger.debug('Entering monitoring period')
self.monitoring = True
def monitor(self):
""" Monitor for changes in time series
The test criteria for CCDC can be represented as:
.. math::
\\sum_{i=0}^{\\color{red}{consec}}
I \\left(
\\sqrt{
\\sum_{b\in \\color{red}{B_{test}}}
\\left(
\\frac
{\hat\\rho_{b,i} - \\rho_{b,i}}
{{RMSE}_b^{\\color{red}{*}}}
\\right)
^2
} > \\color{red}{T_{crit}}
\\right)
> \\color{red}{consec}
where the symbols in red are model hyperparameters:
* :math:`\\color{red}{consec}`:
:paramref:`consecutive <.CCDCesque.consecutive>`
* :math:`\\color{red}{B_{test}}`:
:paramref:`test_indices <.CCDCesque.test_indices>`
* :math:`\\color{red}{T_{crit}}`:
:paramref:`threshold <.CCDCesque.threshold>`
* :math:`{RMSE}_b^{\\color{red}{*}}` depends on:
* :paramref:`dynamic_rmse <.CCDCesque.dynamic_rmse>`:
* True: :func:`~_get_dynamic_rmse` is used for RMSE
* False: :func:`~_get_model_rmse` is used for RMSE
* :paramref:`min_rmse <.CCDCesque.min_rmse>`
* If a `float` or `int` is given, override RMSE estimate
if estimate is smaller than
:paramref:`min_rmse <.CCDCesque.min_rmse>`
If :paramref:`remove_noise <.CCDCesque.remove_noise>` is `True`,
the first of ``consecutive`` observations will be removed if first
scaled residual is above ``threshold`` but not all ``consecutive``
scaled residuals exceed ``threshold``.
"""
_rmse = self.get_rmse()
for idx, model in enumerate(self.models[self.test_indices]):
self.predictions[idx, :] = model.predict(
self.X[self.here:self.here + self.consecutive, :])
_monitor_calc_scores(self.X, self.Y, self.here,
self.scores,
self.predictions, _rmse,
self.test_indices,
self.min_rmse)
# Check for scores above critical value
mag = np.linalg.norm(self.scores, axis=0)
if np.all(mag > self.threshold):
logger.debug('CHANGE DETECTED')
# Update record for last model
self.record[self.n_record]['start'] = self.dates[self.start]
self.record[self.n_record]['end'] = self.dates[self.here]
self.record[self.n_record]['break'] = self.dates[self.here + 1]
for i, m in enumerate(self.models):
self.record[self.n_record]['coef'][:, i] = m.coef
self.record[self.n_record]['rmse'][i] = m.rmse
# Record magnitude of difference for tested indices
self.record[self.n_record]['magnitude'][self.test_indices] = \
np.mean(self.scores, axis=1)
self.record = np.append(self.record, self.record_template)
self.n_record += 1
# Reset _X and _Y for re-training
self._X = self.X
self._Y = self.Y
self.start = self.here + 1
self.trained_date = 0
self.monitoring = False
elif mag[0] > self.threshold and self.remove_noise:
# Masking way of deleting is faster than `np.delete`
m = np.ones(self.X.shape[0], dtype=bool)
m[self.here] = False
self.X = self.X[m, :]
self.Y = self.Y[:, m]
self.dates = self.dates[m]
self.here -= 1
# MODEL FITTING UTILITIES
def _update_model(self):
# Only train if enough time has past
if (abs(self.dates[self.here] - self.trained_date) >
self.retrain_time):
logger.debug('Monitoring - retraining (%s days since last)' %
str(self.dates[self.here] - self.trained_date))
# Fit timeseries models
self.fit_models(self.X[self.start:self.here + 1, :],
self.Y[:, self.start:self.here + 1])
self.trained_date = self.dates[self.here]
# MULTITEMP SCREENING
def _screen_timeseries_LOWESS(self, span=None):
""" Screen entire dataset for noise before training using LOWESS
Args:
span (int): span for LOWESS
Returns:
bool: True if timeseries is screened and we can train, else False
"""
if not self.screened:
span = span or self.consecutive * 2 + 1
mask = smooth_mask(self.dates, self.Y, span,
crit=self.screening_crit,
green=self.green_band, swir1=self.swir1_band)
# Apply mask to X and Y
self.X = self.X[mask, :]
self.Y = self.Y[:, mask]
# Also apply to _X and _Y for training purposes
self._X = self.X
self._Y = self.Y
self.screened = True
return True
def _screen_timeseries_RLM(self):
""" Screen training period for noise with IRWLS RLM
Returns:
bool: True if timeseries is screened and we can train, else False
"""
# Multitemporal noise removal
mask = np.ones(self.X.shape[0], dtype=np.bool)
index = np.arange(self.start, self.here + self.consecutive,
dtype=np.uint16)
mask[index] = multitemp_mask(self.dates[index],
self.Y[:, index],
self.span_time / self.ndays,
crit=self.screening_crit,
green=self.green_band,
swir1=self.swir1_band)
# Check if there are enough observations for model with noise removed
_span_index = mask[index][:-self.consecutive].sum()
# Return if not enough observations
if _span_index < self.min_obs:
logger.debug(' multitemp masking - not enough obs')
return False
# There is enough observations in train period to fit - remove noise
self._X = self.X[mask, :]
self._Y = self.Y[:, mask]
self._dates = self.dates[mask]
# record our current position
# important for next iteration of noise removal
self._here = self.here
# Go forward after noise removal
self.here = self.start + _span_index - 1
if self.span_time < self.ndays:
logger.debug(' multitemp masking - not enough time')
self.here = self._here
return False
logger.debug('Updated "here"')
return True
# RMSE CALCULATION
def _get_model_rmse(self):
""" Return the normal RMSE of each fitted model
Returns:
numpy.ndarray: RMSE of each tested model
"""
return np.array([m.rmse for m in self.models])[self.test_indices]
def _get_dynamic_rmse(self):
""" Return the dynamic RMSE for each model
Dynamic RMSE refers to the Root Mean Squared Error calculated using
`self.min_obs` number of observations closest in day of year to the
observation `self.consecutive` steps into the future. Goal is to
reduce false-positives during seasonal transitions (high variance in
the signal) while decreasing omission during stable times of year.
Returns:
numpy.ndarray: dynamic RMSE of each tested model
"""
# Indices of closest observations based on DOY
i_doy = np.argsort(
np.mod(self.dates[self.start:self.here] -
self.dates[self.here + self.consecutive],
self.ndays))[:self.min_obs]
_rmse = np.zeros(len(self.test_indices), np.float32)
_X = self.X.take(i_doy, axis=0)
for i_b, b in enumerate(self.test_indices):
m = self.models[b]
_rmse[i_b] = rmse(self.Y[b, :].take(i_doy), m.predict(_X))
return _rmse
| {
"content_hash": "8083ba7a20f75f47babf97267b735cd6",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 79,
"avg_line_length": 38.051107325383306,
"alnum_prop": 0.5544412607449857,
"repo_name": "ceholden/yatsm",
"id": "8d328f98380473a9f58c6253f0d543d548194102",
"size": "22336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yatsm/algorithms/ccdc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "72629"
},
{
"name": "Python",
"bytes": "311968"
},
{
"name": "Shell",
"bytes": "2391"
}
],
"symlink_target": ""
} |
"""
Push/pop stack of buffer names. The top buffer of the stack is the one that
currently has the focus.
Note that the stack can contain `None` values. This means that none of the
buffers has the focus.
"""
from __future__ import unicode_literals
from six import string_types
from prompt_toolkit.enums import DEFAULT_BUFFER
__all__ = (
'FocusStack',
)
class FocusStack(object):
def __init__(self, initial=DEFAULT_BUFFER):
self._initial = initial
self.reset()
def __repr__(self):
return '%s(initial=%r, _stack=%r)' % (
self.__class__.__name__, self._initial, self._stack)
def reset(self):
self._stack = [self._initial]
def __contains__(self, value):
return value in self._stack
def pop(self):
if len(self._stack) > 1:
self._stack.pop()
else:
raise IndexError('Cannot pop last item from the focus stack.')
def replace(self, buffer_name):
assert buffer_name is None or isinstance(buffer_name, string_types)
self._stack.pop()
self._stack.append(buffer_name)
def push(self, buffer_name):
assert buffer_name is None or isinstance(buffer_name, string_types)
self._stack.append(buffer_name)
@property
def current(self):
return self._stack[-1]
@property
def previous(self):
"""
Return the name of the previous focussed buffer, or return None.
"""
if len(self._stack) > 1:
return self._stack[-2]
| {
"content_hash": "5c32e86d1680b731162061b607c879de",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 25.55,
"alnum_prop": 0.6086105675146771,
"repo_name": "niklasf/python-prompt-toolkit",
"id": "1bb167d03964e3d854d661e6ae16dfc20e9a20c0",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prompt_toolkit/focus_stack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "635808"
}
],
"symlink_target": ""
} |
"""
`RumCore`.
Licensed under MIT
Copyright (c) 2013 - 2015 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import sys
import codecs
import mmap
import os
import re
import shutil
import sre_parse
from collections import namedtuple
from time import ctime
from backrefs import bre
from collections import deque
from . import text_decode
from wcmatch import wcmatch
from . import util
try:
from backrefs import bregex
import regex
REGEX_SUPPORT = True
except ImportError: # pragma: no cover
REGEX_SUPPORT = False
# Common regex flags (re|regex)
IGNORECASE = 0x1 # (?i)
DOTALL = 0x2 # (?s)
MULTILINE = 0x4 # (?m)
UNICODE = 0x8 # (?u)
# Regex module flags
ASCII = 0x10 # (?a)
FULLCASE = 0x20 # (?f)
WORD = 0x40 # (?w)
BESTMATCH = 0x80 # (?b)
ENHANCEMATCH = 0x100 # (?e)
REVERSE = 0x200 # (?r)
VERSION0 = 0x400 # (?V0)
VERSION1 = 0x800 # (?V1)
FORMATREPLACE = 0x1000 # Use {1} for groups in replace
POSIX = 0x2000 # (?p)
# `Rumcore` search related flags
LITERAL = 0x10000 # Literal search
# `Rumcore` related flags
BUFFER_INPUT = 0x20000 # Input is a buffer
RECURSIVE = 0x40000 # Recursive directory search
FILE_REGEX_MATCH = 0x80000 # Regex pattern for files
DIR_REGEX_MATCH = 0x100000 # Regex pattern for directories
SHOW_HIDDEN = 0x200000 # Show hidden files and folders
COUNT_ONLY = 0x400000 # Only count the matches; no context
BOOLEAN = 0x800000 # Just check if file has one match and move on
PROCESS_BINARY = 0x1000000 # Process binary files
TRUNCATE_LINES = 0x2000000 # Truncate context lines to 120 chars
BACKUP = 0x4000000 # Backup files on replace
BACKUP_FOLDER = 0x8000000 # Backup to folder
FOLLOW_LINKS = 0x10000000 # Follow symlinks
# Fnmatch/Glob flags
EXTMATCH = 0x100000000 # Match with extended patterns +(...) etc.
BRACE = 0x200000000 # Expand braces a{b,c} -> ab ac
FILECASE = 0x400000000 # File case sensitivity
DIRPATHNAME = 0x800000000 # Full directory exclude path match
FILEPATHNAME = 0x1000000000 # Full file name path match
GLOBSTAR = 0x2000000000 # Use globstar (**) in full paths
MATCHBASE = 0x4000000000 # Match base names when no slashes are present (full path)
MINUSNEGATE = 0x8000000000 # Use - instead of ! for exclusion patterns.
RE_MODE = 0
BRE_MODE = 1
REGEX_MODE = 2
BREGEX_MODE = 3
SEARCH_MASK = 0x1FFFF
FILE_MASK = 0xFFFFFE0000
FNMATCH_FLAGS = 0xFF00000000
RE_MODES = (RE_MODE, BRE_MODE)
REGEX_MODES = (REGEX_MODE, BREGEX_MODE)
FORMAT_MODES = (REGEX_MODE, BREGEX_MODE, BRE_MODE)
BACKREFS_MODES = (BRE_MODE, BREGEX_MODE)
TRUNCATE_LENGTH = 120
DEFAULT_BAK = 'rum-bak'
DEFAULT_FOLDER_BAK = '.rum-bak'
_U32 = frozenset(('u32', 'utf32', 'utf_32'))
_U32BE = frozenset(('utf-32be', 'utf_32_be'))
_U32LE = frozenset(('utf-32le', 'utf_32_le'))
_U16 = frozenset(('u16', 'utf16', 'utf_16'))
_U16BE = frozenset(('utf-16be', 'utf_16_be'))
_U16LE = frozenset(('utf-16le', 'utf_16_le'))
_U8 = frozenset(('u8', 'utf', 'utf8', 'utf_8', 'utf_8_sig', 'utf-8-sig'))
RE_LINE_ENDINGS = re.compile(r'(?:\r\n|\r|\n)')
def get_exception():
"""Capture exception and `traceback` separately."""
import traceback
# capture the exception before doing anything else
exc_type, exc_value, exc_tb = sys.exc_info()
try:
exc = ''.join(traceback.format_exception_only(exc_type, exc_value))
tb = ''.join(traceback.format_tb(exc_tb))
finally:
# Prevent circular reference.
del exc_tb
return (exc, tb)
def _re_pattern(pattern, rum_flags=0, binary=False):
"""Prepare regex search pattern flags."""
flags = 0
if rum_flags & MULTILINE:
flags |= re.MULTILINE
if rum_flags & IGNORECASE:
flags |= re.IGNORECASE
if rum_flags & DOTALL:
flags |= re.DOTALL
if not binary and rum_flags & UNICODE:
flags |= re.UNICODE
else:
flags |= re.ASCII
return re.compile(pattern, flags)
def _bre_pattern(pattern, rum_flags=0, binary=False):
"""Prepare regex search pattern flags."""
flags = 0
if rum_flags & MULTILINE:
flags |= bre.MULTILINE
if rum_flags & IGNORECASE:
flags |= bre.IGNORECASE
if rum_flags & DOTALL:
flags |= bre.DOTALL
if not binary and rum_flags & UNICODE:
flags |= bre.UNICODE
else:
flags |= bre.ASCII
return bre.compile(pattern, flags)
def _re_literal_pattern(pattern, rum_flags=0, binary=False):
"""Prepare literal search pattern flags."""
flags = 0
if rum_flags & IGNORECASE:
flags |= re.IGNORECASE
if not binary and rum_flags & UNICODE:
flags |= re.UNICODE
else:
flags |= re.ASCII
return re.compile(re.escape(pattern), flags)
if REGEX_SUPPORT:
def _regex_pattern(pattern, rum_flags=0, binary=False):
"""Prepare regex search pattern flags for regex module."""
flags = 0
if rum_flags & VERSION1:
flags |= regex.VERSION1
else:
flags |= regex.VERSION0
if rum_flags & FULLCASE:
flags |= regex.FULLCASE
if rum_flags & WORD:
flags |= regex.WORD
if rum_flags & BESTMATCH:
flags |= regex.BESTMATCH
if rum_flags & ENHANCEMATCH:
flags |= regex.ENHANCEMATCH
if rum_flags & REVERSE:
flags |= regex.REVERSE
if rum_flags & MULTILINE:
flags |= regex.MULTILINE
if rum_flags & IGNORECASE:
flags |= regex.IGNORECASE
if rum_flags & DOTALL:
flags |= regex.DOTALL
if rum_flags & POSIX:
flags |= regex.POSIX
if not binary and rum_flags & UNICODE:
flags |= regex.UNICODE
else:
flags |= regex.ASCII
return regex.compile(pattern, flags)
def _bregex_pattern(pattern, rum_flags=0, binary=False):
"""Prepare regex search pattern flags for regex module."""
flags = 0
if rum_flags & VERSION1:
flags |= bregex.VERSION1
else:
flags |= bregex.VERSION0
if rum_flags & FULLCASE:
flags |= bregex.FULLCASE
if rum_flags & WORD:
flags |= bregex.WORD
if rum_flags & BESTMATCH:
flags |= bregex.BESTMATCH
if rum_flags & ENHANCEMATCH:
flags |= bregex.ENHANCEMATCH
if rum_flags & REVERSE:
flags |= bregex.REVERSE
if rum_flags & MULTILINE:
flags |= bregex.MULTILINE
if rum_flags & IGNORECASE:
flags |= bregex.IGNORECASE
if rum_flags & DOTALL:
flags |= bregex.DOTALL
if rum_flags & POSIX:
flags |= bregex.POSIX
if not binary and rum_flags & UNICODE:
flags |= bregex.UNICODE
else:
flags |= bregex.ASCII
return bregex.compile(pattern, flags)
def _regex_literal_pattern(pattern, rum_flags=0, binary=False):
"""Prepare literal search pattern flags."""
flags = 0
if rum_flags & VERSION1:
flags |= regex.VERSION1
else:
flags |= regex.VERSION0
if rum_flags & FULLCASE:
flags |= regex.FULLCASE
if not binary and rum_flags & UNICODE:
flags |= regex.UNICODE
else:
flags |= regex.ASCII
if rum_flags & IGNORECASE:
flags |= regex.IGNORECASE
return regex.compile(regex.escape(pattern), flags)
class RummageException(Exception):
"""Rummage exception."""
class RummageTestException(Exception):
"""Rummage exception."""
class FileAttrRecord(namedtuple('FileAttrRecord', ['name', 'ext', 'size', 'modified', 'created', 'skipped', 'error'])):
"""File Attributes."""
class FileInfoRecord(namedtuple('FileInfoRecord', ['id', 'name', 'ext', 'size', 'modified', 'created', 'encoding'])):
"""A record for tracking file info."""
class FileRecord(namedtuple('FileRecord', ['info', 'match', 'error'])):
"""A record that reports file info, matching status, and errors."""
class MatchRecord(namedtuple('MatchRecord', ['lineno', 'colno', 'match', 'lines', 'context'])):
"""A record that contains match information: line number, context, etc."""
class BufferRecord(namedtuple('BufferRecord', ['content', 'error'])):
"""A record with the string buffer replacements."""
class ErrorRecord(namedtuple('ErrorRecord', ['error'])):
"""A record for non-file related errors."""
class Search:
"""Search setup object."""
def __init__(self, replace=False):
"""Setup search object as as a search only or search and replace object."""
self._entry = []
self._is_replace = replace
def add(self, search, replace=None, flags=0):
"""Add search entry."""
self._entry.append(
(
search,
("" if replace is None else replace),
flags & SEARCH_MASK
)
)
def __string__(self):
"""To string."""
return str(self._entry)
def is_replace(self):
"""Is this a replace object."""
return self._is_replace
def __getitem__(self, index):
"""Get entry item."""
return self._entry[index]
def __len__(self):
"""Get length."""
return len(self._entry)
class ReplacePlugin:
"""Rummage replace plugin."""
def __init__(self, file_info, flags):
"""Initialize."""
self.file_info = file_info
self.flags = flags
self.on_init()
def _test(self, m): # pragma: no cover
"""
Used for testing and capturing the exception.
Needs to raise the `RummageTestException`.
This should not be touched by the user.
"""
try:
return self.replace(m)
except Exception:
import traceback
raise RummageTestException(str(traceback.format_exc()))
def on_init(self):
"""Override this function to add initialization setup."""
def get_flags(self):
"""Get flags."""
return self.flags
def get_file_name(self):
"""Get file name."""
return self.file_info.name
def is_binary(self):
"""Is a binary search."""
return self.file_info.encoding.encode == 'bin'
def is_literal(self):
"""Is a literal search."""
return self.flags & LITERAL
def replace(self, m):
"""Make replacement."""
return m.group(0)
class _RummageFileContent:
"""Either return a string or memory map file object."""
def __init__(self, name, size, encoding, file_content=None):
"""Initialize."""
self.name = name
self.size = size
self.encoding = encoding
self.file_obj = None
self.string_buffer = file_content
self.file_map = None
def __enter__(self):
"""Return content of either a memory map file or string."""
return self.string_buffer if self.string_buffer else self._read_file()
def __exit__(self, *args):
"""Close file obj and memory map object if open."""
if self.file_map is not None:
self.file_map.close()
if self.file_obj is not None:
self.file_obj.close()
def _get_encoding(self):
"""Get the encoding."""
enc = self.encoding.encode
if enc == 'utf-8':
enc = 'utf-8-sig'
elif enc.startswith('utf-16'):
enc = 'utf-16'
elif enc.startswith('utf-32'):
enc = 'utf-32'
return enc
def _read_bin(self):
"""Setup binary file reading with `mmap`."""
try:
self.file_obj = open(self.name, "rb")
if self.size != 0:
self.file_map = mmap.mmap(self.file_obj.fileno(), 0, access=mmap.ACCESS_READ)
except Exception:
# _read_bin has no other fallbacks, so we issue this if it fails.
raise RummageException("Could not access or read file.")
def _read_file(self):
"""Read the file in."""
try:
if self.encoding.encode == "bin":
self._read_bin()
else:
enc = self._get_encoding()
self.file_obj = codecs.open(self.name, 'r', encoding=enc)
return self.file_obj.read() if self.file_map is None else self.file_map
except RummageException:
# Bubble up `RummageExceptions`
raise
except Exception:
if self.encoding.encode != "bin":
if self.file_obj is not None:
self.file_obj.close()
self.encoding = text_decode.Encoding("bin", None)
self._read_bin()
return self.file_map
class _FileSearch:
"""Search for files."""
hex_tx_table = ("\ufffd" * 32) + "".join(chr(c) for c in range(32, 127)) + ("\ufffd" * 129)
def __init__(
self, search_obj, file_obj, file_id, flags, context, encoding,
backup_location, max_count, file_content=None, regex_mode=RE_MODE,
encoding_options=None
):
"""Initialize the file search object."""
self.abort = False
self.encoding_options = encoding_options
self.search_obj = search_obj
if (regex_mode in REGEX_MODES and not REGEX_SUPPORT) or (RE_MODE > regex_mode > BREGEX_MODE):
regex_mode = RE_MODE
self.regex_mode = regex_mode
self.flags = flags
self.boolean = bool(self.flags & BOOLEAN)
self.count_only = bool(self.flags & COUNT_ONLY)
self.truncate_lines = bool(self.flags & TRUNCATE_LINES)
self.process_binary = bool(self.flags & PROCESS_BINARY)
self.reverse = False
self.backup = bool(self.flags & BACKUP)
self.backup2folder = bool(self.flags & BACKUP_FOLDER)
self.backup_ext = ('.%s' % backup_location) if not self.backup2folder else DEFAULT_BAK
self.backup_folder = backup_location if self.backup2folder else DEFAULT_FOLDER_BAK
self.bom = None
self.context = (0, 0) if self.truncate_lines else context
# Prepare search
self.expand = None
self.literal = False
self.idx = file_id
self.file_obj = file_obj
self.max_count = max_count
self.encoding = encoding if encoding is not None else None
self.file_content = file_content
self.is_binary = False
self.current_encoding = None
self.is_unicode_buffer = self.file_content is not None and isinstance(self.file_content, str)
def _get_binary_context(self, content, m):
"""Get context info for binary file."""
row = 1
col = 1
before = 0
after = 0
start = m.start()
end = m.end()
eof = len(content) - 1
match_len = m.end() - m.start()
overage = (TRUNCATE_LENGTH - match_len) / 2
if overage > 0:
start = m.start() - overage
end = m.end() + overage
if start < 0:
start = 0
if end > eof:
end = eof
match_start = m.start() - start
match_end = match_start + m.end() - m.start()
if self.truncate_lines:
length = end - start
if length > TRUNCATE_LENGTH:
end = start + TRUNCATE_LENGTH
length = TRUNCATE_LENGTH
# Recalculate relative match start and end
if match_start > length:
match_start = length
if match_end > length:
match_end = TRUNCATE_LENGTH
return (
content[int(start):int(end)].decode('ascii', errors='replace').translate(self.hex_tx_table),
(match_start, match_end),
(before, after),
row,
col
)
def _get_line_endings_to_point(self, point):
"""Get line ending up to the given point."""
try:
while point > self.last_line:
lend = next(self.line_iter)
self.line_map.append(lend.end() - 1)
self.last_line = lend.end()
except StopIteration:
self.last_line = point
pass
def _get_line_endings_count(self, count):
"""Get line ending up to the given point."""
try:
found = 0
for x in range(count):
lend = next(self.line_iter)
self.line_map.append(lend.end() - 1)
self.last_line = lend.end()
found += 1
except StopIteration:
pass
return found
def _get_line_context(self, content, m):
"""Get context info about the line."""
win_end = '\r\n'
before, after = self.context
self._get_line_endings_to_point(m.start())
row = self._get_row(m.start())
col = m.start() + 1
idx = row - 1
lines = len(self.line_map) - 1
start = 0
end = len(content)
# 1 index back gives us the start of this line
# 2 gives us the start of the next
start_idx = idx - before - 1
end_idx = idx + after
# On buffer boundary we may not be able to get
# all of a files requested lines, as we will be beyond
# map's index. Set index to None, as it is invalid,
# and recalculate actual before.
if start_idx < 0:
before -= start_idx + 1
start_idx = None
# Extended beyond map's end
if lines < end_idx:
lines += self._get_line_endings_count(end_idx - lines)
if lines < end_idx:
after -= end_idx - lines
end_idx = None
# Calculate column of cursor and actual start and end of context
if lines != -1:
col_start = idx - 1
col = m.start() - self.line_map[col_start] if col_start >= 0 else m.start() + 1
# \r\n combinations usually show up as one char in editors and displays.
# Decrement the column if we are at a line's end with one of these.
# We will verify any line to account for mixed line endings.
if (
self.line_map and idx < len(self.line_map) and m.start() == self.line_map[idx] and
m.start() != 0 and content[m.start() - 1: m.start() + 1] == win_end
):
col -= 1
if start_idx is not None:
start = self.line_map[start_idx] + 1
if end_idx is not None:
end = self.line_map[end_idx]
# Make the match start and match end relative to the context snippet
match_start = m.start() - start
match_end = match_start + m.end() - m.start()
# Truncate long lines if desired
if self.truncate_lines:
length = end - start
if length > TRUNCATE_LENGTH:
end = start + TRUNCATE_LENGTH
length = TRUNCATE_LENGTH
# Recalculate relative match start and end
if match_start > length:
match_start = length
if match_end > length:
match_end = TRUNCATE_LENGTH
# Return the context snippet, where the match occurs,
# and how many lines of context before and after,
# and the row and column of match start.
return (
content[start:end],
(match_start, match_end),
(before, after),
row,
col
)
def _get_row(self, start):
"""Get line number where result is found in file."""
# Binary Search
mn = 0
mx = len(self.line_map) - 1
if mx == -1 or start <= self.line_map[mn]:
return mn + 1
if start > self.line_map[-1]:
return mx + 2
while mx - mn != 1:
idx = mn + ((mx - mn) >> 1)
if start > self.line_map[idx]:
mn = idx
else:
mx = idx
return mx + 1
def expand_match(self, m):
"""Expand the match."""
if self.is_plugin_replace:
return self.current_replace.replace(m)
elif self.literal:
return self.current_replace
elif self.expand:
return self.expand(m)
elif self.regex_format_replace:
return m.expandf(self.current_replace)
else:
return m.expand(self.current_replace)
def _findall(self, file_content, search_pattern, replace_pattern, flags, file_info):
"""Find all occurrences of search pattern in file."""
replace = None
pattern = None
if (
replace_pattern is not None and
not isinstance(replace_pattern, (str, bytes))
):
replace_pattern = replace_pattern(file_info, flags)
self.is_plugin_replace = True
else:
self.is_plugin_replace = False
self.regex_format_replace = self.regex_mode in FORMAT_MODES and bool(flags & FORMATREPLACE)
if self.is_binary:
try:
pattern = bytes(search_pattern, 'ascii')
except UnicodeEncodeError:
raise RummageException('Unicode chars in binary search pattern')
if replace_pattern is not None and not self.is_plugin_replace:
try:
replace = bytes(replace_pattern, 'ascii')
except UnicodeEncodeError:
raise RummageException('Unicode chars in binary replace pattern')
else:
pattern = search_pattern
replace = replace_pattern
self.current_replace = replace
if pattern is not None:
if bool(flags & LITERAL):
self.literal = True
if self.regex_mode in REGEX_MODES:
pattern = _regex_literal_pattern(pattern, flags, self.is_binary)
else:
pattern = _re_literal_pattern(pattern, flags, self.is_binary)
else:
self.literal = False
if self.regex_mode == BREGEX_MODE:
pattern = _bregex_pattern(pattern, flags, self.is_binary)
if replace is not None and not self.is_plugin_replace:
self.expand = pattern.compile(replace, (bregex.FORMAT if bool(flags & FORMATREPLACE) else 0))
elif self.regex_mode == REGEX_MODE:
pattern = _regex_pattern(pattern, flags, self.is_binary)
elif self.regex_mode == BRE_MODE:
pattern = _bre_pattern(pattern, flags, self.is_binary)
if replace is not None and not self.is_plugin_replace:
self.expand = pattern.compile(replace, (bre.FORMAT if bool(flags & FORMATREPLACE) else 0))
else:
pattern = _re_pattern(pattern, flags, self.is_binary)
if replace is not None and not self.is_plugin_replace:
template = sre_parse.parse_template(replace, pattern)
self.expand = lambda m, t=template: sre_parse.expand_template(t, m)
if REGEX_SUPPORT and isinstance(pattern, (bregex._REGEX_TYPE, bregex.Bregex)):
self.reverse = bool(pattern.flags & regex.REVERSE)
else:
self.reverse = False
self.text_offset = len(file_content) if self.reverse else 0
yield from pattern.finditer(file_content)
def _update_buffer(self, content):
"""Update the buffer content."""
return BufferRecord((b'' if self.is_binary else '').join(content), None)
def _update_file(self, file_name, content):
"""Update the file content."""
encoding = self.current_encoding
if self.backup:
if self.backup2folder:
dirname = os.path.join(os.path.dirname(file_name), self.backup_folder)
basename = os.path.basename(file_name)
backup = os.path.join(dirname, basename)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy2(file_name, backup + '.bak')
else:
backup = file_name + self.backup_ext
shutil.copy2(file_name, backup)
if encoding.bom:
# Write the BOM first, then write in `UTF` format out in the specified order.
with open(file_name, 'wb') as f:
f.write(encoding.bom)
with codecs.open(file_name, 'a', encoding=encoding.encode) as f:
while content:
f.write(content.popleft())
elif encoding.encode == 'bin':
# Write bin file.
with open(file_name, 'wb') as f:
while content:
f.write(content.popleft())
else:
# If a user is adding Unicode to ASCII,
# we write ASCII files out as `utf-8` to keep it from failing.
# We choose `utf-8` because it is compatible with ASCII,
# but we could just as easily have chosen `Latin-1` or `CP1252`.
enc = encoding.encode
with codecs.open(file_name, 'w', encoding=('utf-8' if enc == 'ascii' else enc)) as f:
while content:
f.write(content.popleft())
def _get_file_info(self, file_obj):
"""Create file info record."""
error = None
file_info = None
string_buffer = self.file_content is not None
try:
self.current_encoding = text_decode.Encoding('bin', None)
self.is_binary = False
if string_buffer:
self.current_encoding = text_decode.Encoding('unicode' if self.is_unicode_buffer else 'bin', None)
self.is_binary = not self.is_unicode_buffer
elif self.encoding is not None:
if self.encoding == 'bin':
self.current_encoding = text_decode.Encoding('bin', None)
self.is_binary = True
elif self.encoding.startswith(('utf-8', 'utf-16', 'utf-32')):
bom = text_decode.inspect_bom(file_obj.name)
if bom and bom.encode.startswith(self.encoding):
self.current_encoding = bom
else:
self.current_encoding = text_decode.Encoding(self.encoding, None)
else:
self.current_encoding = text_decode.Encoding(self.encoding, None)
else:
# Guess encoding and decode file
encoding = text_decode.guess(
file_obj.name, verify=False, encoding_options=self.encoding_options
)
if encoding is not None:
if encoding.encode == "bin":
self.is_binary = True
self.current_encoding = encoding
except Exception:
error = get_exception()
file_info = FileInfoRecord(
self.idx,
file_obj.name,
os.path.splitext(file_obj.name)[1].lower().lstrip('.'),
file_obj.size,
file_obj.modified,
file_obj.created,
self.current_encoding.encode.upper()
)
return file_info, error
def kill(self):
"""Kill process."""
self.abort = True
def search_and_replace(self):
"""Search and replace."""
text = deque()
is_buffer = True if self.file_content else False
file_info, error = self._get_file_info(self.file_obj)
if error is not None:
if is_buffer:
yield BufferRecord(None, error)
else:
yield FileRecord(file_info, None, error)
elif not self.is_binary or self.process_binary:
try:
file_record_sent = False
rum_content = _RummageFileContent(
file_info.name, file_info.size, self.current_encoding, self.file_content
)
self.file_content = None
with rum_content as rum_buff:
skip = False
if self.is_binary is False and rum_content.encoding.encode == "bin":
self.is_binary = True
self.current_encoding = rum_content.encoding
if not self.process_binary:
skip = True
file_info = file_info._replace(encoding=self.current_encoding.encode.upper())
if not skip:
pattern, replace, flags = self.search_obj[0]
if REGEX_SUPPORT and isinstance(pattern, bregex._REGEX_TYPE):
self.reverse = bool(pattern.flags & regex.REVERSE)
else:
self.reverse = False
for m in self._findall(rum_buff, pattern, replace, flags, file_info):
if self.reverse:
text.appendleft(rum_buff[m.end(0):self.text_offset])
text.appendleft(self.expand_match(m))
self.text_offset = m.start(0)
else:
text.append(rum_buff[self.text_offset:m.start(0)])
text.append(self.expand_match(m))
self.text_offset = m.end(0)
yield FileRecord(
file_info,
MatchRecord(
0, # line number
0, # column number
(m.start(), m.end()), # Position of match
None, # Line(s) in which match is found
(0, 0) # Number of lines shown before and after matched line(s)
),
None
)
file_record_sent = True
if self.abort:
break
# Grab the rest of the file if we found things to replace.
if not self.abort and (text or len(self.search_obj) > 1):
if self.reverse:
text.appendleft(rum_buff[:self.text_offset])
else:
text.append(rum_buff[self.text_offset:])
# Additional chained replaces
count = 1
if not skip and not self.abort and len(self.search_obj) > 1:
for pattern, replace, flags in self.search_obj[1:]:
text2 = (b'' if self.is_binary else '').join(text)
text = deque()
for m in self._findall(text2, pattern, replace, flags, file_info):
if self.reverse:
text.appendleft(text2[m.end(0):self.text_offset])
text.appendleft(self.expand_match(m))
self.text_offset = m.start(0)
else:
text.append(text2[self.text_offset:m.start(0)])
text.append(self.expand_match(m))
self.text_offset = m.end(0)
yield FileRecord(
file_info,
MatchRecord(
0, # line number
0, # column number
(m.start(), m.end()), # Position of match
None, # Line(s) in which match is found
(0, 0) # Number of lines shown before and after matched line(s)
),
None
)
file_record_sent = True
if self.abort:
break
count += 1
# Grab the rest of the file if we found things to replace.
if not self.abort and (text or count < len(self.search_obj)):
if self.reverse:
text.appendleft(text2[:self.text_offset])
else:
text.append(text2[self.text_offset:])
if self.abort:
break
if not self.abort and text:
# Update the file or buffer depending on what is being used.
# For a buffer, we will actually return the the content via a `BufferRecord`.
if is_buffer:
yield self._update_buffer(text)
file_record_sent = True
else:
self._update_file(
file_info.name, text
)
elif is_buffer:
# Buffers always return a Buffer record at the end
yield BufferRecord(None, None)
file_record_sent = True
if not file_record_sent:
# Always return at least one record
yield FileRecord(file_info, None, None)
except Exception:
# Return a record with the failure attached
if is_buffer:
yield BufferRecord(None, get_exception())
else:
yield FileRecord(file_info, None, get_exception())
def search(self):
"""Search target file or buffer returning a generator of results."""
file_info, error = self._get_file_info(self.file_obj)
if error is not None:
yield FileRecord(file_info, None, error)
elif not self.is_binary or self.process_binary:
try:
file_record_sent = False
rum_content = _RummageFileContent(
file_info.name, file_info.size, self.current_encoding, self.file_content
)
self.file_content = None
with rum_content as rum_buff:
skip = False
if self.is_binary is False and rum_content.encoding.encode == "bin":
self.is_binary = True
self.current_encoding = rum_content.encoding
if not self.process_binary:
skip = True
file_info = file_info._replace(encoding=self.current_encoding.encode.upper())
if not skip:
self.line_map = []
self.last_line = False
self.line_iter = None
get_context = self._get_binary_context if self.is_binary else self._get_line_context
for pattern, replace, flags in self.search_obj:
if hasattr(rum_buff, 'seek'):
rum_buff.seek(0)
if (
self.line_iter is None and not self.boolean and
not self.count_only and not self.is_binary
):
self.line_iter = RE_LINE_ENDINGS.finditer(rum_buff)
for m in self._findall(rum_buff, pattern, replace, flags, file_info):
if not self.boolean and not self.count_only:
# Get line related context.
lines, match, context, row, col = get_context(rum_buff, m)
else:
row = 1
col = 1
match = (m.start(), m.end())
lines = None
context = (0, 0)
file_record_sent = True
yield FileRecord(
file_info,
MatchRecord(
row, # line number
col, # column number
match, # Position of match
lines, # Line(s) in which match is found
context # Number of lines shown before and after matched line(s)
),
None
)
if self.boolean:
break
# Have we exceeded the maximum desired matches?
if self.max_count is not None:
self.max_count -= 1
if self.max_count == 0:
break
if self.abort:
break
if self.abort:
break
if not file_record_sent:
yield FileRecord(file_info, None, None)
except Exception:
yield FileRecord(
file_info, None,
get_exception()
)
def run(self):
"""Start the file search."""
try:
if self.search_obj.is_replace():
yield from self.search_and_replace()
else:
yield from self.search()
except Exception:
yield FileRecord(
FileInfoRecord(
self.idx,
self.file_obj.name,
None,
None,
None,
None,
None
),
None,
get_exception()
)
class _DirWalker(wcmatch.WcMatch):
"""Walk the directory."""
def on_init(
self, file_regex_match=False, folder_regex_exclude_match=False, size=None, modified=None, created=None,
backup_location='', backup_to_folder=False, regex_mode=RE_MODE, regex_ver=0
):
self.file_regex_match = file_regex_match
self.folder_regex_exclude_match = folder_regex_exclude_match
if (regex_mode in REGEX_MODES and not REGEX_SUPPORT) or (RE_MODE > regex_mode or regex_mode > BREGEX_MODE):
regex_mode = RE_MODE
self.regex_ver = 1 if regex_ver else 0
self.regex_mode = regex_mode
self.size = (size[0], size[1]) if size is not None else size
self.modified = modified
self.created = created
self.case_sensitive = wcmatch._wcparse.get_case(self.flags)
self.backup2folder = backup_to_folder
if backup_location:
self.backup_ext = ('.%s' % backup_location.lower()) if not self.backup2folder else DEFAULT_BAK
self.backup_folder = backup_location if self.backup2folder else DEFAULT_FOLDER_BAK
else:
self.backup_ext = None
self.backup_folder = None
if self.pattern_file and self.file_regex_match:
self.file_check = self._compile_regexp(self.pattern_file)
if self.pattern_folder_exclude and self.folder_regex_exclude_match:
self.folder_exclude_check = self._compile_regexp(self.pattern_folder_exclude)
def _compile_regexp(self, string, force_default=False):
r"""Compile or format the inclusion\exclusion pattern."""
pattern = None
if string:
if self.regex_mode == BREGEX_MODE:
flags = bregex.IGNORECASE if not self.case_sensitive else 0
flags |= bregex.VERSION1 if self.regex_ver else bregex.VERSION0
pattern = bregex.compile(string, flags)
elif self.regex_mode == REGEX_MODE:
flags = regex.IGNORECASE if not self.case_sensitive else 0
flags |= bregex.VERSION1 if self.regex_ver else bregex.VERSION0
pattern = regex.compile(string, flags)
elif self.regex_mode == BRE_MODE:
flags = bre.IGNORECASE if not self.case_sensitive else 0
pattern = bre.compile(string, flags)
else:
flags = re.IGNORECASE if not self.case_sensitive else 0
pattern = re.compile(string, flags)
return wcmatch._wcparse.WcRegexp((pattern,), tuple())
def _compare_value(self, limit_check, current):
"""Compare file attribute against limits."""
value_okay = False
qualifier = limit_check[0]
limit = limit_check[1]
if qualifier == "eq":
if current == limit:
value_okay = True
elif qualifier == "lt":
if current < limit:
value_okay = True
elif qualifier == "gt":
if current > limit:
value_okay = True
return value_okay
def _is_times_okay(self, pth):
"""Verify file times meet requirements."""
times_okay = False
mod_okay = False
cre_okay = False
if self.modified is None:
mod_okay = True
else:
mod_okay = self._compare_value(self.modified, self.modified_time)
if self.created is None:
cre_okay = True
else:
cre_okay = self._compare_value(self.created, self.created_time)
if mod_okay and cre_okay:
times_okay = True
return times_okay
def _is_size_okay(self, pth):
"""Verify file size meets requirements."""
size_okay = False
if self.size is None:
size_okay = True
else:
size_okay = self._compare_value(self.size, self.current_size)
return size_okay
def _is_backup(self, name, directory=False):
"""Check if file or directory is a `rumcore` backup."""
is_backup = False
if directory:
if self.backup_folder and self.backup2folder:
if util.platform() == "windows": # pragma: no cover
name = name.lower()
if name == self.backup_folder:
is_backup = True
else:
if self.backup_ext and not self.backup2folder:
if util.platform() == "windows": # pragma: no cover
name = name.lower()
if name.endswith(self.backup_ext):
is_backup = True
return is_backup
def compare_file(self, filename):
"""Compare filename."""
if self.file_regex_match:
return self.file_check.match(filename)
else:
return super().compare_file(filename)
def compare_directory(self, directory):
"""Compare folder."""
if self.folder_regex_exclude_match:
return not self.folder_exclude_check.match(directory)
else:
return super().compare_directory(directory)
def on_validate_file(self, base, name):
"""Validate file override."""
valid = not self._is_backup(name)
fullname = os.path.join(base, name)
self.created_time, self.modified_time, self.current_size = util.get_stat(fullname)
if valid:
valid = self._is_size_okay(fullname)
if valid:
valid = self._is_times_okay(fullname)
return valid
def on_validate_directory(self, base, name):
"""Validate folder override."""
return not self._is_backup(name, True)
def on_skip(self, base, name):
"""On skip."""
return FileAttrRecord(
os.path.join(base, name),
None,
None,
None,
None,
True,
None
)
def on_error(self, base, name):
"""On error."""
return FileAttrRecord(
os.path.join(base, name),
None,
None,
None,
None,
False,
get_exception()
)
def on_match(self, base, name):
"""On match."""
f = os.path.join(base, name)
return FileAttrRecord(
f,
os.path.splitext(f)[1].lower().lstrip('.'),
self.current_size,
self.modified_time,
self.created_time,
False,
None
)
class Rummage:
"""Perform the rummaging."""
def __init__(
self, target, searches, file_pattern=None, folder_exclude=None, limit=1000,
flags=0, context=(0, 0), max_count=None, encoding=None, size=None,
modified=None, created=None, backup_location=None, regex_mode=RE_MODE,
encoding_options=None
):
"""Initialize Rummage object."""
self.abort = False
self.searcher = None
self.path_walker = None
if (regex_mode in REGEX_MODES and not REGEX_SUPPORT) or (RE_MODE > regex_mode > BREGEX_MODE):
regex_mode = RE_MODE
self.regex_mode = regex_mode
if encoding_options is None:
encoding_options = text_decode.DEFAULT_ENCODING_OPTIONS
self.encoding_options = encoding_options
self.search_params = searches
self.file_flags = flags & FILE_MASK
# `wcmatch` flags
self.wcmatch_flags = wcmatch.I | wcmatch.R
if self.file_flags & EXTMATCH:
self.wcmatch_flags |= wcmatch.E
if self.file_flags & BRACE:
self.wcmatch_flags |= wcmatch.B
if self.file_flags & FILECASE:
self.wcmatch_flags |= wcmatch.C
if self.file_flags & DIRPATHNAME:
self.wcmatch_flags |= wcmatch.DP
if self.file_flags & FILEPATHNAME:
self.wcmatch_flags |= wcmatch.FP
if self.file_flags & GLOBSTAR:
self.wcmatch_flags |= wcmatch.G
if self.file_flags & MATCHBASE:
self.wcmatch_flags |= wcmatch.X
if self.file_flags & MINUSNEGATE:
self.wcmatch_flags |= wcmatch.M
if self.file_flags & RECURSIVE:
self.wcmatch_flags |= wcmatch.RV
if self.file_flags & SHOW_HIDDEN:
self.wcmatch_flags |= wcmatch.HD
if self.file_flags & FOLLOW_LINKS:
self.wcmatch_flags |= wcmatch.SL
self.context = context
self.encoding = self._verify_encoding(encoding) if encoding is not None else None
self.skipped = 0
self.backup_location = backup_location
if not self.backup_location or not isinstance(self.backup_location, str):
self.backup_location = DEFAULT_FOLDER_BAK if bool(self.file_flags & BACKUP_FOLDER) else DEFAULT_BAK
self.buffer_input = bool(self.file_flags & BUFFER_INPUT)
self.current_encoding = None
self.idx = -1
self.records = -1
self.max = int(max_count) if max_count is not None else None
self.target = os.path.abspath(target) if not self.buffer_input else target
file_regex_match = bool(self.file_flags & FILE_REGEX_MATCH)
dir_regex_match = bool(self.file_flags & DIR_REGEX_MATCH)
self.path_walker = None
self.is_binary = False
self.files = deque()
self.setup_error = None
try:
# Initialize search objects:
# - `_DirWalker` for if target is a folder
# - Append `FileAttrRecord` if target is a file or buffer
if not self.buffer_input and os.path.isdir(self.target):
self.path_walker = _DirWalker(
self.target,
file_pattern=file_pattern,
exclude_pattern=folder_exclude,
flags=self.wcmatch_flags,
limit=limit,
file_regex_match=file_regex_match,
folder_regex_exclude_match=dir_regex_match,
size=size,
modified=modified,
created=created,
backup_location=self.backup_location if bool(self.file_flags & BACKUP) else None,
backup_to_folder=bool(self.file_flags & BACKUP_FOLDER),
regex_mode=self.regex_mode,
regex_ver=0 if flags & VERSION1 else 1
)
elif not self.buffer_input and os.path.isfile(self.target):
try:
c_time, m_time = util.get_stat(self.target)
self.files.append(
FileAttrRecord(
self.target,
os.path.splitext(self.target)[1].lower().lstrip('.'),
os.path.getsize(self.target),
m_time,
c_time,
False,
None
)
)
except Exception:
self.files.append(
FileAttrRecord(
self.target,
None,
None,
None,
None,
False,
get_exception()
)
)
elif self.buffer_input:
self.files.append(
FileAttrRecord(
None,
None,
len(self.target),
ctime(),
ctime(),
False,
None
)
)
except Exception:
self.setup_error = ErrorRecord(get_exception())
def _verify_encoding(self, encoding):
"""Verify the encoding is okay."""
enc = encoding.lower()
# Normalize `UTFx` encodings as we detect order and BOMs later.
if encoding in _U8:
enc = 'utf-8'
elif encoding in _U16:
enc = 'utf-16'
elif encoding in _U16BE:
enc = 'utf-16-be'
elif encoding in _U16LE:
enc = 'utf-16-le'
elif encoding in _U32:
enc = 'utf-32'
elif encoding in _U32BE:
enc = 'utf-32-be'
elif encoding in _U32LE:
enc = 'utf-32-le'
if enc != 'bin':
codecs.lookup(enc)
return enc
def get_status(self):
"""Return number of files searched out of current number of files crawled."""
return self.idx + 1, self.skipped, self.records + 1
def kill(self):
"""Kill process."""
self.abort = True
if self.searcher:
self.searcher.kill()
if self.path_walker:
self.path_walker.kill()
def _get_next_file(self):
"""Get the next file from the file crawler results."""
self.idx += 1
file_info = self.files.popleft()
return file_info
def search_file(self, content_buffer=None):
"""Search file."""
file_info = self._get_next_file()
if file_info is not None:
self.searcher = _FileSearch(
self.search_params,
file_info,
self.idx,
self.file_flags,
self.context,
self.encoding,
self.backup_location,
self.max,
content_buffer,
self.regex_mode,
self.encoding_options
)
for rec in self.searcher.run():
if rec.error is None:
self.records += 1
if self.max is not None and rec.match is not None:
self.max -= 1
yield rec
if self.max is not None and self.max == 0:
self.kill()
def walk_files(self):
"""Crawl the directory."""
folder_limit = 100
for f in self.path_walker.imatch():
if isinstance(f, FileAttrRecord) and f.skipped:
self.idx += 1
self.records += 1
self.skipped += 1
yield f
elif f.error:
self.idx += 1
self.records += 1
yield f
else:
self.files.append(f)
if self.abort:
break
if len(self.files) >= folder_limit:
count = folder_limit
while count and not self.abort:
count -= 1
for rec in self.search_file():
yield rec
# Clear files if kill was signaled.
if self.abort:
self.files.clear()
# Finish searching the rest
while self.files and not self.abort:
for rec in self.search_file():
yield rec
def find(self):
"""
Walk through a given directory searching files via the provided pattern.
If given a file directly, it will search the file only.
Return the results of each file via a generator.
"""
self.alive = True
self.idx = -1
self.skipped = 0
if self.setup_error:
yield self.setup_error
else:
if len(self.files):
# Single target
if len(self.search_params):
# Search the file
for result in self.search_file(self.target if self.buffer_input else None):
yield result
else:
# Single file with no search pattern, so just return the file
self.records += 1
yield self._get_next_file()
else:
# Directory to crawl
if len(self.search_params):
# Crawl directory and search files.
try:
for result in self.walk_files():
yield result
except Exception: # pragma: no cover
yield ErrorRecord(get_exception())
else:
# No search pattern, so just return files that *would* be searched.
for f in self.path_walker.imatch():
self.idx += 1
self.records += 1
if isinstance(f, FileAttrRecord) and f.skipped:
self.skipped += 1
yield f
if self.abort:
break
if self.abort:
self.files.clear()
self.skipped = self.path_walker.get_skipped()
| {
"content_hash": "b654c56e4923bafabf6f9773ece914f7",
"timestamp": "",
"source": "github",
"line_count": 1602,
"max_line_length": 119,
"avg_line_length": 35.51622971285892,
"alnum_prop": 0.5140165562331933,
"repo_name": "facelessuser/Rummage",
"id": "23bd1389c2464f437f7dad473bd2aa1eb04835a4",
"size": "56921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rummage/lib/rumcore/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31788"
},
{
"name": "HTML",
"bytes": "146911"
},
{
"name": "JavaScript",
"bytes": "17430"
},
{
"name": "Python",
"bytes": "1087411"
}
],
"symlink_target": ""
} |
"""
tables
~~~~~~~~~~~~~~
Process Tables (add default 'left' alignment and inline styles)
"""
from flask.ext.markdown import Extension
from markdown.extensions.tables import TableProcessor, etree
class BSTableProcessor(TableProcessor):
""" Process Tables in a Bootstrap compatible way """
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
a = (a or 'left')
c.set('style', "text-align: %s;" % a)
class TableExtension(Extension):
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add(
'table', BSTableProcessor(md.parser), '<hashheader')
| {
"content_hash": "80eb3d8450e93ec32d05731d78fc478e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 64,
"avg_line_length": 27.94736842105263,
"alnum_prop": 0.6704331450094162,
"repo_name": "kazeeki/proposer",
"id": "9a97bc1e6078b478d59cdd1e90b7a579c4c393b9",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6611"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext as _
from django.utils.text import truncate_words
from django.utils import simplejson
from django.db import models
from django import forms
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.conf import settings
from filer.settings import FILER_STATICMEDIA_PREFIX
class AdminFolderWidget(ForeignKeyRawIdWidget):
choices = None
input_type = 'hidden'
is_hidden = True
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id')
css_id_folder = "%s_folder" % css_id
css_id_description_txt = "%s_description_txt" % css_id
required = self.attrs
if attrs is None:
attrs = {}
related_url = reverse('admin:filer-directory_listing-root')
params = self.url_parameters()
params['select_folder'] = 1
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if not attrs.has_key('class'):
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = []
if obj:
output.append(u'Folder: <span id="%s">%s</span>' % (css_id_description_txt,obj.name))
else:
output.append(u'Folder: <span id="%s">none selected</span>' % css_id_description_txt)
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
output.append('</br>')
#super_attrs = attrs.copy()
#output.append( super(ForeignKeyRawIdWidget, self).render(name, value, super_attrs) )
clearid = '%s_clear' % css_id
output.append('<img id="%s" src="%simg/admin/icon_deletelink.gif" width="10" height="10" alt="%s" title="%s"/>' % (clearid, settings.ADMIN_MEDIA_PREFIX, _('Clear'), _('Clear')))
output.append('<br />')
super_attrs = attrs.copy()
output.append( super(ForeignKeyRawIdWidget, self).render(name, value, super_attrs) )
noimgurl = '%sicons/nofile_32x32.png' % FILER_STATICMEDIA_PREFIX
js = '''<script type="text/javascript">django.jQuery("#%(id)s").hide();
django.jQuery("#%(id)s_clear").click(function(){
django.jQuery("#%(id)s").removeAttr("value");
django.jQuery("#%(foldid)s").attr("src", "%(noimg)s");
django.jQuery("#%(descid)s").html("");
});
django.jQuery(document).ready(function(){
var plus = django.jQuery("#add_%(id)s");
if (plus.length){
plus.remove();
}
});
</script>'''
output.append(js % {'id': css_id, 'foldid': css_id_folder,
'noimg': noimgurl, 'descid': css_id_description_txt})
return mark_safe(u''.join(output))
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
js = (FILER_STATICMEDIA_PREFIX+'js/popup_handling.js',)
class AdminFolderFormField(forms.ModelChoiceField):
widget = AdminFolderWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
other_widget = kwargs.pop('widget', None)
forms.Field.__init__(self, widget=self.widget(rel), *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
from filer.models import Folder
class FilerFolderField(models.ForeignKey):
default_form_class = AdminFolderFormField
default_model_class = Folder
def __init__(self, **kwargs):
return super(FilerFolderField,self).__init__(Folder, **kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
#defaults = {'form_class': FilerFolderWidget}
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFolderField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| {
"content_hash": "bd0c833a63fb696d59b022feb1fc79e1",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 186,
"avg_line_length": 42.774193548387096,
"alnum_prop": 0.6167043740573153,
"repo_name": "ionelmc/django-filer",
"id": "dc7c9cc40369b269c4499484ca3227f97ab65f3a",
"size": "5304",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "filer/fields/folder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "327644"
},
{
"name": "Python",
"bytes": "343093"
}
],
"symlink_target": ""
} |
import numpy as np
from galry import *
from galry.galryplot import GalryPlot
"""
%load_ext ipynbgalry
from IPython.display import display
from galry.galryplot import GalryPlot
a = GalryPlot()
display(a)
"""
# Python handler
def get_json(plot=None):
"""This function takes the displayed object and returns a JSON string
which will be loaded by the Javascript handler."""
return plot.serialize(handler='GalryPlotHandler')
_loaded = False
def load_ipython_extension(ip):
"""Load the extension in IPython."""
global _loaded
if not _loaded:
# Get the formatter.
mime = 'application/json'
formatter = ip.display_formatter.formatters[mime]
# Register handlers.
# The first argument is the full module name where the class is defined.
# The second argument is the class name.
# The third argument is the Python handler that takes an instance of
# this class, and returns a JSON string.
formatter.for_type_by_name('galry.galryplot', 'GalryPlot', get_json)
_loaded = True
| {
"content_hash": "bf9844eae42762f6b5c06ca0d1e306a7",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 28.44736842105263,
"alnum_prop": 0.6845513413506013,
"repo_name": "rossant/galry",
"id": "8d2042ad536ba633d6a0acd436325e9bbe25ddfc",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimental/ipynb/extensions/ipynbgalry/load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "24569"
},
{
"name": "Python",
"bytes": "397431"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
import json
import unittest
from ddt import ddt, data, unpack
from django.test import TestCase, Client
from django.test.utils import override_settings
from django.urls import reverse
from django.contrib.auth.models import User
from django.utils import timezone
from django.http.response import HttpResponseNotFound
from django.utils.safestring import mark_safe
from unittest.mock import patch, Mock
import injections
from ct.models import Course, Unit, Lesson, UnitLesson, CourseUnit, Role, Concept
from ct.templatetags.ct_extras import md2html
from ..models import EnrollUnitCode, Message
from ..serializers import (
InternalMessageSerializer,
InputSerializer,
MessageSerializer,
ChatProgressSerializer,
ChatHistorySerializer,
LessonSerializer,
)
from ..services import TestHandler
from ..models import Chat
from ..fsm_plugin.chat import get_specs
from ..fsm_plugin.additional import get_specs as get_specs_additional
from ..fsm_plugin.resource import END, get_specs as get_specs_resource
from ..fsm_plugin.faq import get_specs as get_specs_faq
from ..views import ChatInitialView, CourseletPreviewView, CheckChatInitialView
class CustomTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('test', 'test@test.com', 'test')
get_specs()[0].save_graph(self.user.username)
get_specs_additional()[0].save_graph(self.user.username)
get_specs_resource()[0].save_graph(self.user.username)
get_specs_faq()[0].save_graph(self.user.username)
self.unit = Unit(title='Test title', addedBy=self.user)
self.unit.save()
self.course = Course(title='Test title',
description='test description',
access='Public',
enrollCode='111',
lockout='222',
addedBy=self.user)
self.course.save()
self.courseunit = CourseUnit(
unit=self.unit, course=self.course,
order=0, addedBy=self.user, releaseTime=timezone.now()
)
self.courseunit.save()
self.concept = Concept.new_concept('bad', 'idea', self.unit, self.user)
lesson = Lesson(title='title', text='きつね', addedBy=self.user, url='/test/url/')
lesson.save()
self.unitlesson = UnitLesson(
unit=self.unit, order=0, lesson=lesson, addedBy=self.user, treeID=lesson.id
)
self.unitlesson.save()
resource_lesson = Lesson(
title='title for resource', text='text for resource', addedBy=self.user
)
resource_lesson.save()
self.resource_unitlesson = UnitLesson(
unit=self.unit, lesson=resource_lesson, addedBy=self.user, treeID=resource_lesson.id
)
self.resource_unitlesson.save()
# TODO remove this later
self.unit_dummy = Unit(title='Test title', addedBy=self.user)
self.unit_dummy.save()
lesson_dummy = Lesson(title='Hope you\'ve overcame the misconception',
text='Hope you\'ve overcame the misconception',
addedBy=self.user, url='/test/url/')
lesson_dummy.save()
self.unitlesson_dummy = UnitLesson(
unit=self.unit_dummy, lesson=lesson_dummy, addedBy=self.user, treeID=lesson_dummy.id
)
self.unitlesson_dummy.save()
@staticmethod
def compile_html(resource):
if resource.lesson.url:
raw_html = '`Read more <{0}>`_ \n\n{1}'.format(
resource.lesson.url,
resource.lesson.text
)
else:
raw_html = resource.lesson.text
return md2html(raw_html)
@override_settings(SUSPEND_SIGNALS=True)
class MainChatViewTests(CustomTestCase):
"""
Tests for main view.
Should enroll user if not enrolled.
Should render main_view.html template.
"""
fixtures = ['chat/tests/fixtures/initial_data_enchanced.json']
def get_course_unit(self):
return CourseUnit.objects.get(id=1)
def test_main_view_enroll(self):
"""
MainView should enroll Student that comes w/ enrollCode.
"""
course_unit = self.get_course_unit()
enroll_code = EnrollUnitCode.get_code(course_unit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content.get('id')
response = self.client.get(reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True)
self.assertTemplateUsed(response, 'chat/main_view.html')
self.assertTrue(
Role.objects.filter(role=Role.ENROLLED, user=self.user, course=course_unit.course).exists()
)
def test_not_enroll_second_time(self):
"""
Should not enroll second time if already enrolled.
"""
enroll_code = EnrollUnitCode.get_code(self.get_course_unit())
self.client.login(username='test', password='test')
role = Role(role=Role.ENROLLED, course=self.course, user=self.user)
role.save()
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True)
self.assertEqual(
Role.objects.filter(role=Role.ENROLLED, user=self.user, course=self.course).count(),
1
)
def test_only_logged_in(self):
"""
Only logged in users can access chat ui.
"""
enroll_code = EnrollUnitCode.get_code(self.get_course_unit())
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content.get('id')
self.assertIsNone(chat_id)
self.assertFalse(response.status_code == 404)
response = self.client.get(reverse('chat:chat_enroll', args=(enroll_code,)), follow=True)
self.assertTemplateUsed(response, 'psa/new_custom_login.html')
def test_404_on_non_existent_enroll_code(self):
"""
Should return 404 if enrollCode is not exists.
"""
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': 'nonexistentenrollcode',
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertTrue(response.status_code == 404)
json.loads(response.content)
response = self.client.get(
reverse('chat:chat_enroll', args=('nonexistentenrollcode', )), follow=True
)
self.assertIsInstance(response, HttpResponseNotFound)
def test_passed_correct_variables(self):
"""
Check that view fill template w/ correct varibles.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True)
variables = (
('course', self.course),
('unit', self.unit),
('lesson_cnt', len(self.unit.get_exercises())),
('duration', len(self.unit.get_exercises()) * 3),
)
for pair in variables:
try:
val_check = response.context[pair[0]]
except KeyError:
val_check = None
self.assertEqual(val_check, pair[1])
self.assertIn('fsmstate', response.context)
self.assertIn('lessons', response.context)
self.assertIn('chat_id', response.context)
self.assertIn('will_learn', response.context)
self.assertIn('need_to_know', response.context)
self.assertIn('chat', response.context)
self.assertIn('chat_sessions', response.context)
def test_chat_init_api(self):
enroll_code = EnrollUnitCode.get_code(self.courseunit)
lesson1 = Lesson(title='title1', text='きつね', kind='orct', addedBy=self.user, url='/test/url/')
lesson1.save()
lesson2 = Lesson(title='title2', text='きつね', kind='orct', addedBy=self.user, url='/test/url/')
lesson2.save()
self.unitlesson1 = UnitLesson(
unit=self.unit, order=1, lesson=lesson1, addedBy=self.user, treeID=lesson1.id
)
self.unitlesson1.save()
self.unitlesson2 = UnitLesson(
unit=self.unit, order=2, lesson=lesson2, addedBy=self.user, treeID=lesson2.id
)
self.unitlesson2.save()
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
self.assertTrue(response.status_code == 200)
self.assertTrue(bool(json_content['id']))
self.assertTrue(bool(json_content['session']))
# @patch('chat.views.ChatInitialView.next_handler.start_point', return_value=Mock())
@patch('chat.api.InitNewChat.get_view')
def test_next_handler_start_point_called_once(self, get_view):
"""
Check that ChatInitialView.next_handler.start_point called once.
"""
course_unit = self.get_course_unit()
enroll_code = EnrollUnitCode.get_code(course_unit)
start_point = Mock()
view = ChatInitialView()
view.next_handler = Mock(start_point=start_point)
get_view.return_value = view
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
start_point.assert_called_once()
chat_id = json_content.get('id')
self.assertTrue(response.status_code == 200)
start_point.assert_called_once()
self.client.get(reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
start_point.assert_called_once()
@override_settings(SUSPEND_SIGNALS=True)
class MessagesViewTests(CustomTestCase):
"""
Test for MessagesView API.
"""
fixtures = ['chat/tests/fixtures/initial_data_enchanced.json']
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def test_positive_case(self):
"""
Check positive case for MessagesView response.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][1]['id']
response = self.client.get(
reverse('chat:messages-detail', args=(msg_id,)),
{'chat_id': chat_id},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['addMessages']), 3)
# There are two messages - congrats and look ant resources
self.assertIn('Congratulations!', json_content['addMessages'][2]['html'])
self.assertIn('Please look over the available resources in the side panel.', json_content['addMessages'][3]['html'])
def test_permission_denied(self):
"""
Check for permissions check.
User from request should be the same as message owner.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][1]['id']
self.user = User.objects.create_user('middle_man', 'test@test.com', 'test')
self.client.login(username='middle_man', password='test')
response = self.client.get(
reverse('chat:messages-detail', args=(msg_id,)),
{'chat_id': chat_id},
follow=True
)
self.assertEqual(response.status_code, 403)
@unittest.skip("TODO - review the put logic")
def test_inappropriate_message_put(self):
"""
Check for inappropriate PUT request.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][1]['id']
msg_data = json_content['addMessages'][1]
msg_data['text'] = 'test text'
msg_data['chat_id'] = chat_id
response = self.client.put(
reverse('chat:messages-detail', args=(msg_id,)),
data=json.dumps(msg_data),
content_type='application/json',
follow=True
)
self.assertEqual(response.status_code, 200)
json_content = json.loads(response.content)
self.assertNotIn('text', json_content['addMessages'][0])
def test_valid_message_put(self):
"""
Test a valid case when Student puts `text` to add `Response`.
"""
course_unit = Course.objects.all()[0].get_course_units()[0]
enroll_code = EnrollUnitCode.get_code(course_unit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(
reverse('chat:history'), {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
next_url, _ = self._push_continue(next_url, chat_id)
answer = 'My Answer'
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
self.assertEqual(response.status_code, 200)
json_content = json.loads(response.content)
self.assertIn('html', json_content['addMessages'][0])
self.assertEqual(
json_content['addMessages'][0]['html'], mark_safe(md2html('Now you can move to the next lesson')))
next_url = json_content['input']['url']
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
self.assertEqual(json_content['addMessages'][0]['type'], 'breakpoint')
def test_typical_chat_flow(self):
"""
Check for typical chat flow.
"""
course_unit = Course.objects.all()[0].get_course_units()[0]
enroll_code = EnrollUnitCode.get_code(course_unit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
# get history
response = self.client.get(
reverse('chat:history'), {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
next_url, _ = self._push_continue(next_url, chat_id)
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
# post answer
answer = 'My Answer'
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message (confidence)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEqual(len(json_content['addMessages']), 2)
# confidence answer
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
# self eval answer
self_eval = json_content['input']['options'][2]['value']
self_eval_text = json_content['input']['options'][2]['text']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEqual(len(json_content['addMessages']), 3)
response = self.client.put(
next_url,
data=json.dumps({"option": self_eval, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], self_eval_text)
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
# get next question (2)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(len(json_content['addMessages']), 1)
self.assertEqual(json_content['addMessages'][0]['html'], 'Answer please')
# post answer (2)
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message (confidence) (2)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# confidence answer
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message - self eval (2)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
self_eval = json_content['input']['options'][0]['value']
# self eval answer (2)
response = self.client.put(
next_url,
data=json.dumps({"option": self_eval, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message - error models
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertNotIn(
'data-selectable-value="80"', json_content['addMessages'][-1]['html']
)
# Lesson from fixtures
lesson = Lesson.objects.get(id=78)
lesson.add_unit_aborts = True
lesson.save()
# get the same message - error models
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertIn(
'data-selectable-value="80"', json_content['addMessages'][-1]['html']
)
next_url = json_content['input']['url']
msg_id = json_content['input']['includeSelectedValuesFromMessages'][0]
# TODO select error model 80 after changing the flow
# {"selected": {msg_id: {"errorModel": ["80"]}}
# post error model answer
response = self.client.put(
next_url,
data=json.dumps({"selected": {}, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# Roll FAQs
response = self.client.put(
next_url,
data=json.dumps({"option": 'no', "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
# get next message - question (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
next_url, json_content = self._push_continue(next_url, chat_id)
# Move to the next Thread
next_url, json_content = self._push_continue(next_url, chat_id)
self.assertEqual(json_content['input']['type'], 'text')
# Response should contain only DIVIDER and Question (ORCT) itself
self.assertEqual(len(json_content['addMessages']), 2)
# post answer (3)
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
# get next message - confidence (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# post confidence answer (3)
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
next_url = json_content['input']['url']
# get next message - self eval (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self_eval = json_content['input']['options'][1]['value']
# post self eval answer (3
response = self.client.put(
next_url,
data=json.dumps({"option": self_eval, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message - error models (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
msg_id = json_content['input']['includeSelectedValuesFromMessages'][0]
# post error model (3)
response = self.client.put(
next_url,
data=json.dumps({"selected": {msg_id: {"errorModel": ["104"]}}, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# TODO: what's going on after this line? WRITE COMMENTS!!!
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(
json_content['addMessages'][0]['html'],
'<dl>\n<dt><strong>Re: Em1</strong></dt>\n<dd><p>Em1 description</p>\n</dd>\n</dl>\n'
)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
status_value = json_content['input']['options'][0]['value']
response = self.client.put(
next_url,
data=json.dumps({"option": status_value, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# Rolling forward FAQ flow
response = self.client.put(
next_url,
data=json.dumps({"option": 'yes', "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(len(json_content['addMessages']), 2)
self.assertEqual(json_content['addMessages'][0]['html'], 'Yes')
# post FAQ (3)
response = self.client.put(
next_url,
data=json.dumps({"text": 'FAQ title', "chat_id": chat_id}),
content_type='application/json',
follow=True
)
# get next message - confidence (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.put(
next_url,
data=json.dumps({"text": 'FAQ description', "chat_id": chat_id}),
content_type='application/json',
follow=True
)
# get next message - confidence (3)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
next_url, json_content = self._push_continue(next_url, chat_id)
# Get next Thread
self.assertEqual(json_content['addMessages'][0]['type'], 'breakpoint')
def test_preview_forbidden(self):
"""
Check that ON author can't access preview page.
"""
course_unit = Course.objects.all()[0].get_course_units()[0]
response = self.client.login(username='test', password='test')
enroll = EnrollUnitCode.get_code_for_user_chat(
course_unit=course_unit,
is_live=False,
user=User.objects.get(username='test'),
is_preview=True
)
response = self.client.get(
reverse('chat:preview_courselet',
kwargs={'enroll_key': enroll.enrollCode}),
)
assert b'This Courselet is not published yet or you have no permisions to open it.' in response.content
class HistoryAPIViewTests(CustomTestCase):
"""
Tests /history API.
"""
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def test_positive_response(self):
"""
Test positive case for /history call.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
chat_id = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
).context['chat_id']
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 200)
def test_permission_denied(self):
"""
Check that chat history can be viewed by chat author only.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
chat_id = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
).context['chat_id']
self.user = User.objects.create_user('middle_man', 'test@test.com', 'test')
self.client.login(username='middle_man', password='test')
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 403)
def test_content(self):
"""
Check that history content fits API documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
lesson = self.unitlesson.lesson
lesson.text = '🦊'
lesson.save()
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
chat_id = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
).context['chat_id']
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
self.assertIsInstance(json_content['input'], dict)
self.assertIsInstance(json_content['addMessages'], list)
# Last transition is omited so we have to have 4 messages
# Last two messages must be about core sequence completion
self.assertEqual(len(json_content['addMessages']), 4)
self.assertEqual(json_content['addMessages'][0]['name'], self.unitlesson.addedBy.username)
self.assertEqual(json_content['addMessages'][0]['html'], self.unitlesson.lesson.title)
self.assertEqual(json_content['addMessages'][1]['type'], 'message')
self.assertEqual(json_content['addMessages'][2]['type'], 'message')
self.assertEqual(json_content['addMessages'][2]['html'], 'Congratulations! You have completed the core lessons for this courselet.')
self.assertEqual(json_content['addMessages'][3]['html'], 'Please look over the available resources in the side panel.')
self.assertEqual(
json_content['addMessages'][1]['html'],
self.compile_html(self.unitlesson)
)
@override_settings(SUSPEND_SIGNALS=True)
class NumbersTest(CustomTestCase):
"""Tests to check numbers functionality."""
fixtures = ['chat/tests/fixtures/initial_numbers.json']
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def test_typical_chat_flow(self):
"""
Check for typical chat flow.
"""
course_unit = Course.objects.get(title='numbers course').get_course_units()[0]
enroll_code = EnrollUnitCode.get_code(course_unit)
self.client.login(username='alex', password='123')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
# get history
response = self.client.get(
reverse('chat:history'), {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertEqual(json_content['input']['subType'], 'numbers')
next_url = json_content['input']['url']
# post answer
not_correct_answer = 'SOmeText'
answer = '1'
response = self.client.put(
next_url,
data=json.dumps({"text": not_correct_answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
self.assertEqual({'error': 'Not correct value!'}, json_content)
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message (confidence)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEqual(len(json_content['addMessages']), 2)
# confidence answer
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
# self eval answer
self_eval = json_content['input']['options'][2]['value']
self_eval_text = json_content['input']['options'][2]['text']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEqual(len(json_content['addMessages']), 3)
response = self.client.put(
next_url,
data=json.dumps({"option": self_eval, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], self_eval_text)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], self_eval_text)
grading_msg = 'Your answer is partially correct!'
self.assertEqual(json_content['addMessages'][1]['html'], grading_msg)
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
self.assertEqual(json_content['input']['subType'], 'numbers')
self.assertEqual(len(json_content['addMessages']), 2)
# post answer (2)
response = self.client.put(
next_url,
data=json.dumps({"text": answer, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message (confidence) (2)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# confidence answer
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message - self eval (2)
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
self_eval = json_content['input']['options'][0]['value']
# self eval answer (2)
response = self.client.put(
next_url,
data=json.dumps({"option": self_eval, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# get next message - error models
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertNotIn(
'data-selectable-value=""', json_content['addMessages'][-1]['html']
)
# Lesson from fixtures
lesson = Lesson.objects.get(id=78)
lesson.add_unit_aborts = True
lesson.save()
# get the same message - error models
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertIn(
'data-selectable-value="128"', json_content['addMessages'][-1]['html']
)
next_url = json_content['input']['url']
# msg_id = json_content['input']['includeSelectedValuesFromMessages'][0]
# TODO select error model 80 after changing the flow
# {"selected": {msg_id: {"errorModel": ["80"]}}
# post error model answer
response = self.client.put(
next_url,
data=json.dumps({"selected": {}, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# Roll FAQs
response = self.client.put(
next_url,
data=json.dumps({"option": 'no', "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, chat_id)
self.assertEqual(next_url, None)
class ProgressAPIViewTests(CustomTestCase):
"""
Tests for /progress API.
"""
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def test_positive_response(self):
"""
Test positive case for /progress call.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:progress'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 200)
def test_permission_denied(self):
"""
Check that chat progress can be viewed by chat author only.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
self.user = User.objects.create_user('middle_man', 'test@test.com', 'test')
self.client.login(username='middle_man', password='test')
response = self.client.get(reverse('chat:progress'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 403)
def test_content(self):
"""
Check that history content fits API documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:progress'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
self.assertIsInstance(json_content['progress'], int)
self.assertIsInstance(json_content['breakpoints'], list)
self.assertEqual(len(json_content['breakpoints']), 1)
self.assertEqual(json_content['progress'], 1)
self.assertEqual(json_content['breakpoints'][0]['html'], self.unitlesson.lesson.title)
self.assertEqual(json_content['breakpoints'][0]['isDone'], True)
self.assertEqual(json_content['breakpoints'][0]['isUnlocked'], True)
class ResourcesViewTests(CustomTestCase):
"""
Tests for /resources API call.
"""
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def test_positive_case(self):
"""
Test positive case for /resources call.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:resources-list'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 200)
def test_permission_denied(self):
"""
Check that chat resources can be viewed by chat author only.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
self.user = User.objects.create_user('middle_man', 'test@test.com', 'test')
self.client.login(username='middle_man', password='test')
response = self.client.get(reverse('chat:resources-list'), {'chat_id': chat_id}, follow=True)
self.assertEqual(response.status_code, 403)
def test_content(self):
"""
Check that resources content fits ResourcesAPI documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:resources-list'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
self.assertIsInstance(json_content['breakpoints'], list)
self.assertEqual(len(json_content['breakpoints']), 2)
# TODO Need to investigate why concepts also presented as Resources
self.assertEqual(
json_content['breakpoints'][1]['html'], self.resource_unitlesson.lesson.title
)
self.assertEqual(json_content['breakpoints'][1]['isDone'], False)
self.assertEqual(json_content['breakpoints'][1]['isStarted'], False)
self.assertEqual(json_content['breakpoints'][1]['isAvailable'], True)
response = self.client.get(reverse('chat:resources-list'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
self.assertEqual(json_content['breakpoints'][1]['isAvailable'], True)
def test_get_resources_message_by_id(self):
"""
Test get resources message by id from /resources response.
Checks that returned content fits resources API documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:resources-list'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
resource_response = self.client.get(
reverse('chat:resources-detail', args=(json_content['breakpoints'][0]['ul'],)),
{'chat_id': chat_id}
)
self.assertEqual(resource_response.status_code, 200)
resource_response = self.client.get(
reverse('chat:resources-detail', args=(json_content['breakpoints'][1]['ul'],)),
{'chat_id': chat_id}
)
self.assertEqual(resource_response.status_code, 200)
json_content = json.loads(resource_response.content)
self.assertIsInstance(json_content['input'], dict)
self.assertIsInstance(json_content['addMessages'], list)
self.assertEqual(len(json_content['addMessages']), 3)
self.assertIn('nextMessagesUrl', json_content)
self.assertIsNone(json_content['nextMessagesUrl'])
self.assertIn('id', json_content)
self.assertEqual(json_content['addMessages'][0]['name'], self.resource_unitlesson.addedBy.username)
self.assertEqual(json_content['addMessages'][0]['type'], 'breakpoint')
self.assertEqual(json_content['addMessages'][0]['html'], self.resource_unitlesson.lesson.title)
self.assertEqual(json_content['addMessages'][1]['type'], 'message')
self.assertEqual(
json_content['addMessages'][1]['html'],
self.compile_html(self.resource_unitlesson)
)
self.assertEqual(json_content['addMessages'][2]['type'], 'message')
self.assertEqual(json_content['addMessages'][2]['html'], END.ultimate_help)
self.assertIn('url', json_content['input'])
self.assertIn('includeSelectedValuesFromMessages', json_content['input'])
self.assertIn('html', json_content['input'])
self.assertIn('type', json_content['input'])
self.assertIn('options', json_content['input'])
class InternalMessageSerializerTests(CustomTestCase):
"""
Tests for InternalMessageSerializer.
"""
def test_serializer_data(self):
"""
Check that InternalMessageSerializer result fits documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
# no chat session yet, so we need to init it
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][0]['id']
msg = Message.objects.get(id=msg_id)
result = InternalMessageSerializer().to_representation(msg)
attrs = ('id', 'type', 'name', 'userMessage', 'avatar', 'html')
for attr in attrs:
self.assertIn(attr, result)
class InputSerializerTests(CustomTestCase):
"""
Tests for InputSerializer.
"""
def test_serializer_data(self):
"""
Check that InputSerializer result fits documentation.
"""
input_data = {
'type': 'custom',
'url': None,
'options': ['option1', 'option2'],
'includeSelectedValuesFromMessages': [],
'html': 'some html',
'doWait': False
}
result = InputSerializer().to_representation(input_data)
attrs = ('type', 'url', 'options', 'includeSelectedValuesFromMessages', 'html', 'doWait')
for attr in attrs:
self.assertIn(attr, result)
class MesasageSerializerTests(CustomTestCase):
"""
Tests for MessageSerializer.
"""
def setUp(self):
inj = injections.Container()
inj['next_handler'] = TestHandler()
self.MessageSerializerForTest = inj.inject(MessageSerializer)
super(MesasageSerializerTests, self).setUp()
def test_serializer_data(self):
"""
Check that MessageSerializer result fits documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][0]['id']
msg = Message.objects.get(id=msg_id)
result = self.MessageSerializerForTest().to_representation(msg)
attrs = ('id', 'input', 'addMessages', 'nextMessagesUrl')
for attr in attrs:
self.assertIn(attr, result)
class ChatProgressSerializerTests(CustomTestCase):
"""
Tests for ChatProgressSerializer.
"""
def test_serializer_data(self):
"""
Check that ChatProgressSerializer result fits documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
# no chat session yet, so we need to init it
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
result = ChatProgressSerializer().to_representation(Chat.objects.get(id=chat_id))
attrs = ('progress', 'breakpoints')
for attr in attrs:
self.assertIn(attr, result)
class ChatHistorySerializerTests(CustomTestCase):
"""
Tests for ChatHistorySerializer.
"""
def test_serializer_data(self):
"""
Check that ChatHistorySerializer result fits documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
# no chat session yet, so we need to init it
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
result = ChatHistorySerializer().to_representation(Chat.objects.get(id=chat_id))
attrs = ('input', 'addMessages')
for attr in attrs:
self.assertIn(attr, result)
class LessonSerializerTests(CustomTestCase):
"""
Tests for LessonSerializer.
"""
def test_serializer_data(self):
"""
Check that LessonSerializer result fits documentation.
"""
enroll_code = EnrollUnitCode.get_code(self.courseunit)
self.client.login(username='test', password='test')
# no chat session yet, so we need to init it
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
chat_id = json_content['id']
response = self.client.get(
reverse('chat:chat_enroll', args=(enroll_code, chat_id)), follow=True
)
response = self.client.get(reverse('chat:history'), {'chat_id': chat_id}, follow=True)
json_content = json.loads(response.content)
msg_id = json_content['addMessages'][1]['id']
msg = Message.objects.get(id=msg_id)
result = LessonSerializer().to_representation(msg.content)
attrs = (
'id',
'html',
'isUnlocked',
'isDone'
)
for attr in attrs:
self.assertIn(attr, result)
self.assertEqual(result['id'], msg_id)
self.assertEqual(result['html'], msg.content.lesson.title)
@ddt
class TestChatGetBackUrls(CustomTestCase):
"""
Test that back_url on chat pages are correct.
Logic should be:
- if it is usual course view - back_url should go to LMS
- if it is course preview - back_url should go to CTMS
- if it is add lesson by chat - back_url shout go to CTMS
- if it is course tester - back url should go to LMS
"""
@unpack
@data(
(ChatInitialView, "Course",
lambda self: reverse(
'lms:course_view',
kwargs={'course_id': self.course.id})
),
(CourseletPreviewView, "Return",
lambda self: reverse(
'ctms:courslet_view',
kwargs={
'course_pk': self.course.id,
'pk': self.unit.pk
})
),
(CheckChatInitialView, "Return",
lambda self: reverse(
'lms:tester_course_view',
kwargs={'course_id': self.course.id})
)
)
def test_back_url(self, cls, valid_name, url_callable):
kwargs = {'courseUnit': self.courseunit}
name, url = cls.get_back_url(**kwargs)
valid_url = url_callable(self)
self.assertEqual(name, valid_name)
self.assertEqual(url, valid_url)
class MultipleChoiceTests(CustomTestCase):
"""
Tests for the Multiple Choice question in the chat.
Checks:
- ASCII error
- Formative flow
- Concept inventiory flow
- True multiple selection flow [there is no correct choice]
"""
def setUp(self):
super(MultipleChoiceTests, self).setUp()
self.enroll_code = EnrollUnitCode.get_code(self.courseunit)
lesson = self.unitlesson.lesson
lesson.text = 'вопрос?\r\n[choices]\r\n() один\r\nобъяснение\r\n(*) два\r\n'+\
'потому что потому\r\n() три\r\nобъяснение\r\n() четыре\r\nобъяснение'
lesson.sub_kind = "choices"
lesson.kind = "orct"
lesson.addedBy = self.user
lesson.save()
lesson2 = Lesson(title='title2', text='два', kind='answer', addedBy=self.user)
lesson2.save()
self.unitlesson2 = UnitLesson(
unit=self.unit, kind=UnitLesson.ANSWERS, lesson=lesson2, addedBy=self.user, treeID=lesson2.id,
parent=self.unitlesson
)
self.unitlesson2.save()
lesson3 = Lesson(title='title3', text='1', kind='orct', addedBy=self.user)
lesson3.save()
self.unitlesson3 = UnitLesson(
unit=self.unit, order=1, lesson=lesson3, addedBy=self.user, treeID=lesson3.id
)
self.unitlesson3.save()
self.client.login(username='test', password='test')
response = self.client.get(
reverse(
'chat:init_chat_api',
kwargs={
'enroll_key': self.enroll_code,
'chat_id': 0
}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
json_content = json.loads(response.content)
self.chat_id = json_content['id']
self.assertNotIsInstance(response, HttpResponseNotFound)
response = self.client.get(
reverse('chat:chat_enroll', args=(self.enroll_code, self.chat_id)), follow=True
)
def _push_continue(self, next_url, chat_id):
"""
Click Continue button to roll forward to the next Message.
"""
response = self.client.put(
next_url,
data=json.dumps({"option": 1, "chat_id": chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': chat_id}, follow=True
)
json_content = json.loads(response.content)
return json_content['input']['url'], json_content
def get_history(self):
response = self.client.get(
reverse('chat:history'), {'chat_id': self.chat_id}, follow=True
)
json_content = json.loads(response.content)
self.assertEqual(json_content['input']['subType'], 'choices')
next_url = json_content['input']['url']
return json_content, next_url
def post_answer(self, json_content, next_url, choices):
message_id = json_content['input']['includeSelectedValuesFromMessages']
response = self.client.put(
next_url,
data=json.dumps({"options": 1, "selected": {message_id[0]: {"choices": choices}}, "chat_id": self.chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
response = self.client.get(
next_url, {'chat_id': self.chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertIsNotNone(json_content['input']['options'])
self.assertEqual(len(json_content['addMessages']), 2)
return json_content, next_url
def confidence_answer(self, json_content, next_url):
conf = json_content['input']['options'][2]['value']
conf_text = json_content['input']['options'][2]['text']
response = self.client.put(
next_url,
data=json.dumps({"option": conf, "chat_id": self.chat_id}),
content_type='application/json',
follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
self.assertEqual(json_content['addMessages'][0]['html'], conf_text)
response = self.client.get(
next_url, {'chat_id': self.chat_id}, follow=True
)
json_content = json.loads(response.content)
next_url = json_content['input']['url']
return json_content, next_url
def get_and_check_next_question(self, json_content, next_url):
response = self.client.get(
next_url, {'chat_id': self.chat_id}, follow=True
)
self.assertEqual(response.status_code, 200)
def test_ascii_valid(self):
lesson = self.unitlesson.lesson
lesson.text = 'вопрос?\r\n[choices]\r\n() один\r\nобъяснение\r\n(*) два\r\nпотому что потому\r\n()'+\
' три\r\nобъяснение\r\n() четыре\r\nобъяснение'
lesson.save()
json_content, next_url = self.get_history()
json_content, next_url = self.post_answer(json_content, next_url, [1])
json_content, next_url = self.confidence_answer(json_content, next_url)
answer_msg = "You got it right, the correct answer is: два"
explanation_msg = "потому что потому"
self.assertIn(answer_msg, json_content['addMessages'][1]['html'])
self.assertIn(explanation_msg, json_content['addMessages'][1]['html'])
# We have to click `Move th the next Thread` button to see a breakpoint
self.assertNotIn('breakpoint', json_content['addMessages'][2]['type'])
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, self.chat_id)
self.get_and_check_next_question(json_content, next_url)
def test_ascii_invalid(self):
lesson = self.unitlesson.lesson
lesson.text = 'вопрос?\r\n[choices]\r\n() один\r\nобъяснение\r\n(*) два\r\nпотому что потому\r\n()'+\
' три\r\nобъяснение\r\n() четыре\r\nобъяснение'
lesson.save()
json_content, next_url = self.get_history()
json_content, next_url = self.post_answer(json_content, next_url, [0])
json_content, next_url = self.confidence_answer(json_content, next_url)
answer_msg = "The correct answer is: два"
explanation_msg = "потому что потому"
self.assertIn(answer_msg, json_content['addMessages'][1]['html'])
self.assertIn(explanation_msg, json_content['addMessages'][1]['html'])
answer_msg = "You selected: один"
self.assertIn(answer_msg, json_content['addMessages'][2]['html'])
# FAQ message
self.assertIn('message', json_content['addMessages'][3]['type'])
# Roll FAQs
next_url, json_content = self._push_continue(next_url, self.chat_id)
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, self.chat_id)
# TODO move one step futher to real question
self.get_and_check_next_question(json_content, next_url)
def test_ascii_void_valid(self):
lesson = self.unitlesson.lesson
lesson.text = 'вопрос?\r\n[choices]\r\n() один\r\nобъяснение\r\n() два\r\nпотому что потому\r\n()'+\
' три\r\nобъяснение\r\n() четыре\r\nобъяснение'
lesson.save()
json_content, next_url = self.get_history()
json_content, next_url = self.post_answer(json_content, next_url, [])
json_content, next_url = self.confidence_answer(json_content, next_url)
answer_msg = "You got it right, the correct answer is: " + self.unitlesson2.lesson.title
explanation_msg = self.unitlesson2.lesson.text
self.assertIn(answer_msg, json_content['addMessages'][1]['html'])
self.assertIn(explanation_msg, json_content['addMessages'][1]['html'])
self.assertNotIn('breakpoint', json_content['addMessages'][2]['type'])
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, self.chat_id)
self.get_and_check_next_question(json_content, next_url)
def test_ascii_void_invalid(self):
lesson = self.unitlesson.lesson
lesson.text = 'вопрос?\r\n[choices]\r\n() один\r\nобъяснение\r\n(*) два\r\nпотому что потому\r\n()'+\
' три\r\nобъяснение\r\n() четыре\r\nобъяснение'
lesson.save()
json_content, next_url = self.get_history()
json_content, next_url = self.post_answer(json_content, next_url, [])
json_content, next_url = self.confidence_answer(json_content, next_url)
answer_msg = "The correct answer is: два"
explanation_msg = "потому что потому"
self.assertIn(answer_msg, json_content['addMessages'][1]['html'])
self.assertIn(explanation_msg, json_content['addMessages'][1]['html'])
self.assertIn('Is there anything else you\'re wondering about, where you\'d like clarification or something you\'re unsure about this point?', json_content['addMessages'][3]['html'])
self.assertIn('message', json_content['addMessages'][3]['type'])
answer_msg = "You selected: Nothing"
self.assertIn(answer_msg, json_content['addMessages'][2]['html'])
# Roll FAQs
next_url, json_content = self._push_continue(next_url, self.chat_id)
# Click `move to the next Thread` button
next_url, json_content = self._push_continue(next_url, self.chat_id)
# TODO move one step futher to real question
self.get_and_check_next_question(json_content, next_url)
| {
"content_hash": "75cf69286e962db3253b816c725729e9",
"timestamp": "",
"source": "github",
"line_count": 2203,
"max_line_length": 190,
"avg_line_length": 36.021788470267815,
"alnum_prop": 0.5690685014365643,
"repo_name": "cjlee112/socraticqs2",
"id": "32f4130022e2e3c50e5e55d5a859fe6095a298a1",
"size": "79789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/chat/tests/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules = [Extension("explicit_cython2", ["explicit_cython2.pyx"])]
setup(
name = 'Explicit method using Cython',
cmdclass = {'build_ext': build_ext},
include_dirs = [np.get_include()],
ext_modules = ext_modules
)
'''
ext_module = Extension(
"explicit_cython2",
["explicit_cython2.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
)
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [ext_module],
)
''' | {
"content_hash": "3ccc2bfd116573c858441005135ce4a2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 22.37037037037037,
"alnum_prop": 0.6754966887417219,
"repo_name": "pewen/transferencia_calor",
"id": "4eed74da5b8b5f5471cbef55a180af1e328e1f9d",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Notebooks/cython_method/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3588"
},
{
"name": "HTML",
"bytes": "120885"
},
{
"name": "Python",
"bytes": "13188"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from nose import SkipTest
from billiard.compat import _winapi
from .utils import Case
WIN32_CONSTANTS = {
'ERROR_ALREADY_EXISTS',
'ERROR_PIPE_BUSY',
'ERROR_PIPE_CONNECTED',
'ERROR_SEM_TIMEOUT',
'ERROR_MORE_DATA',
'ERROR_BROKEN_PIPE',
'ERROR_IO_PENDING',
'ERROR_NETNAME_DELETED',
'GENERIC_READ',
'GENERIC_WRITE',
'DUPLICATE_SAME_ACCESS',
'DUPLICATE_CLOSE_SOURCE',
'INFINITE',
'NMPWAIT_WAIT_FOREVER',
'OPEN_EXISTING',
'PIPE_ACCESS_DUPLEX',
'PIPE_ACCESS_INBOUND',
'PIPE_READMODE_MESSAGE',
'PIPE_TYPE_MESSAGE',
'PIPE_UNLIMITED_INSTANCES',
'PIPE_WAIT',
'PROCESS_ALL_ACCESS',
'PROCESS_DUP_HANDLE',
'WAIT_OBJECT_0',
'WAIT_ABANDONED_0',
'WAIT_TIMEOUT',
'FILE_FLAG_FIRST_PIPE_INSTANCE',
'FILE_FLAG_OVERLAPPED',
}
WIN32_FUNCTIONS = {
'CloseHandle',
'GetLastError',
'OpenProcess',
'ExitProcess',
'ConnectNamedPipe',
'CreateFile',
'WriteFile',
'ReadFile',
'CreateNamedPipe',
'SetNamedPipeHandleState',
'WaitNamedPipe',
'PeekNamedPipe',
'WaitForMultipleObjects',
'WaitForSingleObject',
'GetCurrentProcess',
'GetExitCodeProcess',
'TerminateProcess',
'DuplicateHandle',
'CreatePipe',
}
class test_win32_module(Case):
def setUp(self):
if sys.platform != 'win32':
raise SkipTest('win32 only tests')
def test_constants(self):
for const in WIN32_CONSTANTS:
self.assertIsNotNone(getattr(_winapi, const))
getattr(_winapi, 'NULL')
def test_functions(self):
getattr(_winapi, 'Overlapped')
for fun in WIN32_FUNCTIONS:
self.assertTrue(getattr(_winapi, fun))
| {
"content_hash": "395001d8eb4706021024acb0c6c3b376",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 57,
"avg_line_length": 22.40506329113924,
"alnum_prop": 0.6293785310734463,
"repo_name": "jakirkham/billiard",
"id": "aad9bc6b35e750a4286953d170d8ab8c9f19a993",
"size": "1770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "billiard/tests/test_win32.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "60738"
},
{
"name": "Makefile",
"bytes": "1508"
},
{
"name": "PowerShell",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "405247"
}
],
"symlink_target": ""
} |
from south.db import db
from django.db import models
from ietf.ietfworkflows.models import *
class Migration:
def forwards(self, orm):
# Adding model 'StreamedID'
db.create_table('ietfworkflows_streamedid', (
('id', orm['ietfworkflows.streamedid:id']),
('draft', orm['ietfworkflows.streamedid:draft']),
('stream', orm['ietfworkflows.streamedid:stream']),
))
db.send_create_signal('ietfworkflows', ['StreamedID'])
# Adding model 'Stream'
db.create_table('ietfworkflows_stream', (
('id', orm['ietfworkflows.stream:id']),
('name', orm['ietfworkflows.stream:name']),
('with_groups', orm['ietfworkflows.stream:with_groups']),
('group_model', orm['ietfworkflows.stream:group_model']),
('group_chair_model', orm['ietfworkflows.stream:group_chair_model']),
('workflow', orm['ietfworkflows.stream:workflow']),
))
db.send_create_signal('ietfworkflows', ['Stream'])
def backwards(self, orm):
# Deleting model 'StreamedID'
db.delete_table('ietfworkflows_streamedid')
# Deleting model 'Stream'
db.delete_table('ietfworkflows_stream')
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'idtracker.acronym': {
'Meta': {'db_table': "'acronym'"},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'acronym_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_key': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'idtracker.idintendedstatus': {
'Meta': {'db_table': "'id_intended_status'"},
'intended_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'intended_status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.idstatus': {
'Meta': {'db_table': "'id_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.internetdraft': {
'Meta': {'db_table': "'internet_drafts'"},
'abstract': ('django.db.models.fields.TextField', [], {}),
'b_approve_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_discussion_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dunn_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'expired_tombstone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'extension_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Acronym']", 'db_column': "'group_acronym_id'"}),
'id_document_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id_document_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDIntendedStatus']"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'lc_changes': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'lc_expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lc_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'replaced_by': ('django.db.models.fields.related.ForeignKey', ["orm['idtracker.InternetDraft']"], {'related_name': "'replaces_set'", 'null': 'True', 'db_column': "'replaced_by'", 'blank': 'True'}),
'review_by_rfc_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'revision_date': ('django.db.models.fields.DateField', [], {}),
'rfc_number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDStatus']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'id_document_name'"}),
'txt_page_count': ('django.db.models.fields.IntegerField', [], {}),
'wgreturn_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'idtracker.personororginfo': {
'Meta': {'db_table': "'person_or_org_info'"},
'address_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'first_name_key': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'last_name_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'middle_initial': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'middle_initial_key': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'person_or_org_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'ietfworkflows.annotationtag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Permission']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'annotation_tags'", 'to': "orm['workflows.Workflow']"})
},
'ietfworkflows.annotationtagobjectrelation': {
'annotation_tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.AnnotationTag']"}),
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'annotation_tags'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'ietfworkflows.objectannotationtaghistoryentry': {
'change_date': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.TextField', [], {}),
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'annotation_tags_history'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']"}),
'setted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'unsetted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'ietfworkflows.objectworkflowhistoryentry': {
'comment': ('django.db.models.fields.TextField', [], {}),
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'workflow_history'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'from_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']"}),
'to_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'transition_date': ('django.db.models.fields.DateTimeField', [], {})
},
'ietfworkflows.stateobjectrelationmetadata': {
'estimated_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.StateObjectRelation']"})
},
'ietfworkflows.stream': {
'group_chair_model': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group_model': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'with_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.WGWorkflow']"})
},
'ietfworkflows.streamedid': {
'draft': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['idtracker.InternetDraft']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.Stream']"})
},
'ietfworkflows.wgworkflow': {
'selected_states': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['workflows.State']", 'null': 'True', 'blank': 'True'}),
'selected_tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['ietfworkflows.AnnotationTag']", 'null': 'True', 'blank': 'True'}),
'workflow_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['workflows.Workflow']", 'unique': 'True', 'primary_key': 'True'})
},
'permissions.permission': {
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'workflows.state': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'transitions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['workflows.Transition']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': "orm['workflows.Workflow']"})
},
'workflows.stateobjectrelation': {
'Meta': {'unique_together': "(('content_type', 'content_id', 'state'),)"},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'state_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.State']"})
},
'workflows.transition': {
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destination_state'", 'null': 'True', 'to': "orm['workflows.State']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Permission']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transitions'", 'to': "orm['workflows.Workflow']"})
},
'workflows.workflow': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_state': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'workflow_state'", 'null': 'True', 'to': "orm['workflows.State']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['permissions.Permission']", 'symmetrical': 'False'})
}
}
complete_apps = ['ietfworkflows']
| {
"content_hash": "e17c5b7dc8230f8979475be25281cb4e",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 209,
"avg_line_length": 79.4375,
"alnum_prop": 0.5581916116927919,
"repo_name": "mcr/ietfdb",
"id": "1e3de388c4f8d5047e6fbfadf11d5e8920a26781",
"size": "16524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ietf/ietfworkflows/migrations/0005_add_streams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
} |
from django.conf import settings
import logging
from config_uplus import *
from xpay_context import Context
logger = logging.getLogger(__name__)
class XPayClient(object):
def __init__(self, is_test=True):
"""Initialize xpay client.
Args:
home_dir: (string) Path where configuration files are stored
mode: (string) Mode of operation (test/service)
"""
self._is_test = is_test
self._response_code = None
self._response_message = None
self._mid = LGUPLUS_MID
def init_tx(self):
self._context = Context(self._mid)
def set(self, key, value):
self._context.set(key, value)
def TX(self):
# Initialize variables
is_success = False
is_rollback = False
is_reporting = False
rollback_on_error = auto_rollback
report_on_error = report_error
# (reports)
report_status = None
report_message = None
rollback_reason = None
context_success = self._context.TX(self.URL)
if not context_success:
is_rollback = True
elif (self._context.status < 200 or
self._context.status >= 300):
self._response_code = 30000 + self._context.status
self._response_message = u"HTTP response code = {response_code}".format(response_code=self._context.status)
logger.error(u"[%s] TX failed: response code = %s, response message = %d",
self.tx_id, self._response_code, self._response_message)
report_status = u"HTTP response {response_code}".format(response_code=self._context.status)
report_message = self._context.body
if report_on_error:
is_rollback = True
rollback_reason = u"HTTP {response_code}".format(response_code=self._context.status)
is_reporting = True
elif self._context.is_json:
is_success = True
self._response_code = self._context.response_code
self._response_message = self._context.response_message
elif rollback_on_error:
is_rollback = True
rollback_reason = u"JSON decode fail"
if rollback_on_error and is_rollback:
new_context = Context(self._mid)
new_context.set("LGD_TXNAME", "Rollback")
new_context.set("LGD_RB_TXID", self.tx_id)
new_context.set("LGD_RB_REASON", rollback_reason)
new_context.TX(self.URL)
if report_on_error and is_reporting:
new_context = Context(self._mid)
new_context.set("LGD_TXNAME", "Report")
new_context.set("LGD_STATUS", report_status)
new_context.set("LGD_MSG", report_message)
new_context.TX(aux_url)
return is_success
def rollback(self, rollback_reason):
try:
new_context = Context(self._mid)
new_context.set("LGD_TXNAME", "Rollback")
new_context.set("LGD_RB_TXID", self.tx_id)
new_context.set("LGD_RB_REASON", rollback_reason)
is_success = new_context.TX(self.URL)
except:
is_success = False
if not is_success:
logger.error("[%s] Rollback failed!", self.tx_id)
return is_success
@property
def tx_id(self):
return self._context.tx_id
@property
def URL(self):
if self._is_test:
url = test_url
else:
url = dacom_url
return url
@property
def response_code(self):
return self._response_code
@property
def response_message(self):
return self._response_message
@property
def response_names(self):
return self._context.response_names
@property
def response_name_count(self):
return self._context.response_name_count
@property
def response_array_count(self):
return self._context.response_array_count
def response_name(self, idx):
return self._context.response_name(idx)
def response(self, name, idx=0):
return self._context.response(name, idx)
def response_with_default(self, name, default):
try:
response = self.response(name)
except:
response = default
return response
#if __name__ == '__main__':
# xpay_client = XPayClient(is_test=True)
# xpay_client.init_tx()
# xpay_client.set("LGD_TXNAME", "Ping")
# xpay_client.set("LGD_DUMMY", "=+A\uAC00\uB098\uB2E4")
# xpay_client.set("LGD_RESULTCNT", "3");
# xpay_client.TX()
| {
"content_hash": "3e6f22e17127dcb0fc5943cb54ea1a1f",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 119,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.5482037751167039,
"repo_name": "MyMusicTaste/django-oscar-uplus",
"id": "c71c09de60b3bf93b1fee185b714890144df4a99",
"size": "4952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uplus/xpay/xpay_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "65990"
}
],
"symlink_target": ""
} |
"""
ERP+
"""
__author__ = 'CVTek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'gap_opiniao.GAPOpiniao'
import auth, base_models
from orm import *
from form import *
try:
from my_gap_senha import GAPSenha
except:
from gap_senha import GAPSenha
class GAPOpiniao(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'gap_opiniao'
self.__title__ = 'Opinião'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome']
self.__order_by__ = 'gap_opiniao.nome'
self.__workflow__ = (
'estado', {'Confirmado':[]}
)
self.__auth__ = {
'read':['All'],
'write':['Atendedor'],
'create':['Gestor de Loja'],
'delete':['Gestor de Atendimento'],
'full_access':['Gestor de Atendimento']
}
self.__no_edit__ = [
('estado', ['Confirmado'])
]
self.nome = string_field(view_order = 1, name = 'Nome', args='readonly', size = 80, search=False, onlist=False)
self.contacto = string_field(view_order = 2, args='readonly', name = 'Contacto', onlist=False, size = 40)
self.data = date_field(view_order=3, name ='Data', args='readonly', default=datetime.date.today())
self.hora = time_field(view_order=4, name ='Hora', args='readonly', default=time.strftime('%H:%M:%S'))
self.observacao = text_field(view_order=5, name='Observação', size=100, args="rows=30", onlist=False, search=False)
self.classificacao = string_field(view_order = 6, args='readonly', name = 'Classificação', size = 40)
self.senha = string_field(view_order = 7, args='readonly', name = 'Senha', size = 50)
self.servico = string_field(view_order = 8, name = 'Serviço', args='readonly',size = 50)
self.loja = string_field(view_order = 9, name = 'Loja', size = 50, args='readonly')
self.estado = info_field(view_order = 10, name='Estado', default='Confirmado', args='readonly', hidden=True, nolabel=True, onlist=False)
#Apanha todas as opinioes disponiveis
def get_self(self):
return self.get_options()
def get_opts(self, get_str):
"""
Este get_opts em todos os modelos serve para alimentar os choice e combo deste modelo e não chama as funções
get_options deste modelo quando chamadas a partir de um outro!
"""
return eval(get_str)
#Apanha todas as opinioes por data
def get_opiniao_data(self, data=None):
#Essa funçao apanha opiniao por data
def get_results():
options = []
opts = self.get(order_by='nome')
for option in opts:
if option['data'] == data:
options.append((str(option['id']), option['nome'] + ' - ' + option['observacao']))
return options
return erp_cache.get(key=self.__model_name__ + '_opiniao_data', createfunc=get_results)
#Apanha todas as opinioes por nome
def get_opiniao_nome(self, nome=None):
#Essa funçao apanha opiniao por nome
def get_results():
options = []
opts = self.get(order_by='nome')
for option in opts:
if option['nome'] == nome:
options.append((str(option['id']), option['nome'] + ' - ' + option['observacao']))
return options
return erp_cache.get(key=self.__model_name__ + '_opiniao_nome', createfunc=get_results)
#adiciona dados na tabela gap_opiniao
def addOpiniao(self,user=None,nome=None,contacto=None,comentario=None,classificacao=None, loja=None, nome_atendedor=None):
try:
from gap_timeAtendimento import GAPTimeAtendimento
#Apanho o ultimo cliente atendido por esse atendedor que teoricamente foi aquele que fez a avaliaçao
result = GAPTimeAtendimento().getLastClient(nome_atendedor=nome_atendedor, loja=loja)
result = str(result).split(";")
senha = result[0]
servico = result[1]
data = datetime.date.today()
hora = datetime.datetime.now().time().strftime('%H:%M:%S')
content = {
'user': user,
'nome': nome,
'contacto':contacto,
'data':data,
'hora':hora,
'observacao':comentario,
'classificacao':classificacao,
'senha':senha,
'servico':servico,
'loja':loja,
'estado':'Confirmado',
}
GAPOpiniao(**content).put()
return True
except:
return False
#get avaliaçao do serviço
def getRating(self, servico=None, loja=None, dataInicio=None, dataFim=None):
try:
dataInicio = str(dataInicio).split("-")
dataFim = str(dataFim).split("-")
if servico == None:
self.where = "loja='{loja}'".format(loja=loja)
else:
self.where = "servico='{servico}' and loja='{loja}'".format(servico=servico, loja=loja)
opts = self.get()
for option in opts:
data_opiniao = str(option['data']).split("-")
if (datetime.date(int(data_opiniao[0]),int(data_opiniao[1]), int(data_opiniao[2]))>=datetime.date(int(dataInicio[0]),int(dataInicio[1]), int(dataInicio[2]))) and (datetime.date(int(data_opiniao[0]),int(data_opiniao[1]), int(data_opiniao[2]))<=datetime.date(int(dataFim[0]),int(dataFim[1]), int(dataFim[2]))):
return str(option['classificacao'])
return "0.0"
except:
return "0.0" | {
"content_hash": "0fc1c79b17f6f9f97a6bc7719ad884a6",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 324,
"avg_line_length": 42.94814814814815,
"alnum_prop": 0.5660572611245257,
"repo_name": "IdeaSolutionsOnline/ERP4R",
"id": "795064cafbb90517119789c162454bb23529d561",
"size": "5862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/objs/gap_opiniao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "280709"
},
{
"name": "HTML",
"bytes": "631043"
},
{
"name": "JavaScript",
"bytes": "90675"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "1808337"
},
{
"name": "Smarty",
"bytes": "523490"
}
],
"symlink_target": ""
} |
"""
sy.exception
------------
:synopsis: Exceptions used by the library
.. moduleauthor: Paul Diaconescu <p@afajl.com>
"""
class Error(Exception):
''' Base exception for the sy '''
def __init__(self, msg):
self.msg = msg
Exception.__init__(self)
def __unicode__(self):
return self.msg
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, str(self))
__str__ = __unicode__
| {
"content_hash": "ed558bf3d91f9f02ecdf758f7661681d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 63,
"avg_line_length": 17.92,
"alnum_prop": 0.5290178571428571,
"repo_name": "afajl/sy",
"id": "8a5b5fef3f94e96a231bf298aefb143e8bd67ed3",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sy/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "100752"
}
],
"symlink_target": ""
} |
"""
Database models.
"""
from django.db import models
class FlagonParams(models.Model):
"""
Parameters for a feature.
"""
key = models.CharField(max_length=255)
value = models.TextField()
type = models.CharField(
max_length=30,
choices=(('bool', 'bool'), ('int', 'int'), ('str', 'str')))
def __repr__(self):
return unicode("%s=%s(%s)" % (self.key, self.type, self.value))
__str__ = __repr__
class FlagonFeature(models.Model):
"""
A feature.
"""
name = models.CharField(max_length=255)
active = models.BooleanField(default=False)
strategy = models.CharField(max_length=255, blank=True, null=True)
params = models.ForeignKey(FlagonParams, null=True, blank=True)
def __repr__(self):
return unicode(self.name)
__str__ = __repr__
| {
"content_hash": "037869e7c52967e1272c9fcce638d348",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 23.8,
"alnum_prop": 0.5978391356542617,
"repo_name": "pombredanne/flagon",
"id": "ab4ba651e85039388b645316d89ec170cdbd02f8",
"size": "1950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/flagon/backends/db_django/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3787"
},
{
"name": "Python",
"bytes": "69165"
}
],
"symlink_target": ""
} |
"""
FormPost Middleware
Translates a browser form post into a regular Swift object PUT.
The format of the form is::
<form action="<swift-url>" method="POST"
enctype="multipart/form-data">
<input type="hidden" name="redirect" value="<redirect-url>" />
<input type="hidden" name="max_file_size" value="<bytes>" />
<input type="hidden" name="max_file_count" value="<count>" />
<input type="hidden" name="expires" value="<unix-timestamp>" />
<input type="hidden" name="signature" value="<hmac>" />
<input type="file" name="file1" /><br />
<input type="submit" />
</form>
The <swift-url> is the URL to the Swift desination, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
The name of each file uploaded will be appended to the <swift-url>
given. So, you can upload directly to the root of container with a
url like::
https://swift-cluster.example.com/v1/AUTH_account/container/
Optionally, you can include an object prefix to better separate
different users' uploads, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
Note the form method must be POST and the enctype must be set as
"multipart/form-data".
The redirect attribute is the URL to redirect the browser to after
the upload completes. The URL will have status and message query
parameters added to it, indicating the HTTP status code for the
upload (2xx is success) and a possible message for further
information if there was an error (such as "max_file_size exceeded").
The max_file_size attribute must be included and indicates the
largest single file upload that can be done, in bytes.
The max_file_count attribute must be included and indicates the
maximum number of files that can be uploaded with the form. Include
additional ``<input type="file" name="filexx" />`` attributes if
desired.
The expires attribute is the Unix timestamp before which the form
must be submitted before it is invalidated.
The signature attribute is the HMAC-SHA1 signature of the form. Here is
sample code for computing the signature::
import hmac
from hashlib import sha1
from time import time
path = '/v1/account/container/object_prefix'
redirect = 'https://myserver.com/some-page'
max_file_size = 104857600
max_file_count = 10
expires = int(time() + 600)
key = 'mykey'
hmac_body = '%s\\n%s\\n%s\\n%s\\n%s' % (path, redirect,
max_file_size, max_file_count, expires)
signature = hmac.new(key, hmac_body, sha1).hexdigest()
The key is the value of the X-Account-Meta-Temp-URL-Key header on the
account.
Be certain to use the full path, from the /v1/ onward.
The command line tool ``swift-form-signature`` may be used (mostly
just when testing) to compute expires and signature.
Also note that the file attributes must be after the other attributes
in order to be processed correctly. If attributes come after the
file, they won't be sent with the subrequest (there is no way to
parse all the attributes on the server-side without reading the whole
thing into memory -- to service many requests, some with large files,
there just isn't enough memory on the server, so attributes following
the file are simply ignored).
"""
__all__ = ['FormPost', 'filter_factory', 'READ_CHUNK_SIZE', 'MAX_VALUE_LENGTH']
import hmac
import re
import rfc822
from hashlib import sha1
from time import time
from urllib import quote
from swift.common.middleware.tempurl import get_tempurl_keys_from_metadata
from swift.common.utils import streq_const_time
from swift.common.wsgi import make_pre_authed_env
from swift.proxy.controllers.base import get_account_info
#: The size of data to read from the form at any given time.
READ_CHUNK_SIZE = 4096
#: The maximum size of any attribute's value. Any additional data will be
#: truncated.
MAX_VALUE_LENGTH = 4096
#: Regular expression to match form attributes.
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
class FormInvalid(Exception):
pass
def _parse_attrs(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if '; ' in header:
header, attrs = header.split('; ', 1)
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class _IterRequestsFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer):
self.no_more_data_for_this_file = False
self.no_more_files = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
def read(self, length=None):
if not length:
length = READ_CHUNK_SIZE
if self.no_more_data_for_this_file:
return ''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
chunk = self.wsgi_input.read(to_read)
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_files = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_files = self.input_buffer.startswith('--')
self.no_more_data_for_this_file = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_file:
return ''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
chunk = self.wsgi_input.read(READ_CHUNK_SIZE)
self.input_buffer += chunk
newline_pos = self.input_buffer.find('\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_files = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = ''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
def _iter_requests(wsgi_input, boundary):
"""
Given a multi-part mime encoded input file object and boundary,
yield file-like objects for each part.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like
objects on.
:returns: A generator of file-like objects for each part.
"""
boundary = '--' + boundary
if wsgi_input.readline().strip() != boundary:
raise FormInvalid('invalid starting boundary')
boundary = '\r\n' + boundary
input_buffer = ''
done = False
while not done:
it = _IterRequestsFileLikeObject(wsgi_input, boundary, input_buffer)
yield it
done = it.no_more_files
input_buffer = it.input_buffer
class _CappedFileLikeObject(object):
"""
A file-like object wrapping another file-like object that raises
an EOFError if the amount of data read exceeds a given
max_file_size.
:param fp: The file-like object to wrap.
:param max_file_size: The maximum bytes to read before raising an
EOFError.
"""
def __init__(self, fp, max_file_size):
self.fp = fp
self.max_file_size = max_file_size
self.amount_read = 0
def read(self, size=None):
ret = self.fp.read(size)
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
def readline(self):
ret = self.fp.readline()
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
class FormPost(object):
"""
FormPost Middleware
See above for a full description.
The proxy logs created for any subrequests made will have swift.source set
to "FP".
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'POST':
try:
content_type, attrs = \
_parse_attrs(env.get('CONTENT_TYPE') or '')
if content_type == 'multipart/form-data' and \
'boundary' in attrs:
http_user_agent = "%s FormPost" % (
env.get('HTTP_USER_AGENT', ''))
env['HTTP_USER_AGENT'] = http_user_agent.strip()
status, headers, body = self._translate_form(
env, attrs['boundary'])
start_response(status, headers)
return body
except (FormInvalid, EOFError) as err:
body = 'FormPost: %s' % err
start_response(
'400 Bad Request',
(('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))))
return [body]
return self.app(env, start_response)
def _translate_form(self, env, boundary):
"""
Translates the form data into subrequests and issues a
response.
:param env: The WSGI environment dict.
:param boundary: The MIME type boundary to look for.
:returns: status_line, headers_list, body
"""
keys = self._get_keys(env)
status = message = ''
attributes = {}
file_count = 0
for fp in _iter_requests(env['wsgi.input'], boundary):
hdrs = rfc822.Message(fp, 0)
disp, attrs = \
_parse_attrs(hdrs.getheader('Content-Disposition', ''))
if disp == 'form-data' and attrs.get('filename'):
file_count += 1
try:
if file_count > int(attributes.get('max_file_count') or 0):
status = '400 Bad Request'
message = 'max file count exceeded'
break
except ValueError:
raise FormInvalid('max_file_count not an integer')
attributes['filename'] = attrs['filename'] or 'filename'
if 'content-type' not in attributes and 'content-type' in hdrs:
attributes['content-type'] = \
hdrs['Content-Type'] or 'application/octet-stream'
status, message = self._perform_subrequest(env, attributes, fp,
keys)
if status[:1] != '2':
break
else:
data = ''
mxln = MAX_VALUE_LENGTH
while mxln:
chunk = fp.read(mxln)
if not chunk:
break
mxln -= len(chunk)
data += chunk
while fp.read(READ_CHUNK_SIZE):
pass
if 'name' in attrs:
attributes[attrs['name'].lower()] = data.rstrip('\r\n--')
if not status:
status = '400 Bad Request'
message = 'no files to process'
redirect = attributes.get('redirect')
if not redirect:
body = status
if message:
body = status + '\r\nFormPost: ' + message.title()
headers = [('Content-Type', 'text/plain'),
('Content-Length', len(body))]
return status, headers, body
status = status.split(' ', 1)[0]
if '?' in redirect:
redirect += '&'
else:
redirect += '?'
redirect += 'status=%s&message=%s' % (quote(status), quote(message))
body = '<html><body><p><a href="%s">' \
'Click to continue...</a></p></body></html>' % redirect
headers = [('Location', redirect), ('Content-Length', str(len(body)))]
return '303 See Other', headers, body
def _perform_subrequest(self, orig_env, attributes, fp, keys):
"""
Performs the subrequest and returns the response.
:param orig_env: The WSGI environment dict; will only be used
to form a new env for the subrequest.
:param attributes: dict of the attributes of the form so far.
:param fp: The file-like object containing the request body.
:param keys: The account keys to validate the signature with.
:returns: (status_line, message)
"""
if not keys:
return '401 Unauthorized', 'invalid signature'
try:
max_file_size = int(attributes.get('max_file_size') or 0)
except ValueError:
raise FormInvalid('max_file_size not an integer')
subenv = make_pre_authed_env(orig_env, 'PUT', agent=None,
swift_source='FP')
if 'QUERY_STRING' in subenv:
del subenv['QUERY_STRING']
subenv['HTTP_TRANSFER_ENCODING'] = 'chunked'
subenv['wsgi.input'] = _CappedFileLikeObject(fp, max_file_size)
if subenv['PATH_INFO'][-1] != '/' and \
subenv['PATH_INFO'].count('/') < 4:
subenv['PATH_INFO'] += '/'
subenv['PATH_INFO'] += attributes['filename'] or 'filename'
if 'content-type' in attributes:
subenv['CONTENT_TYPE'] = \
attributes['content-type'] or 'application/octet-stream'
elif 'CONTENT_TYPE' in subenv:
del subenv['CONTENT_TYPE']
try:
if int(attributes.get('expires') or 0) < time():
return '401 Unauthorized', 'form expired'
except ValueError:
raise FormInvalid('expired not an integer')
hmac_body = '%s\n%s\n%s\n%s\n%s' % (
orig_env['PATH_INFO'],
attributes.get('redirect') or '',
attributes.get('max_file_size') or '0',
attributes.get('max_file_count') or '0',
attributes.get('expires') or '0')
has_valid_sig = False
for key in keys:
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if streq_const_time(sig, (attributes.get('signature') or
'invalid')):
has_valid_sig = True
if not has_valid_sig:
return '401 Unauthorized', 'invalid signature'
substatus = [None]
def _start_response(status, headers, exc_info=None):
substatus[0] = status
i = iter(self.app(subenv, _start_response))
try:
i.next()
except StopIteration:
pass
return substatus[0], ''
def _get_keys(self, env):
"""
Fetch the tempurl keys for the account. Also validate that the request
path indicates a valid container; if not, no keys will be returned.
:param env: The WSGI environment for the request.
:returns: list of tempurl keys
"""
parts = env['PATH_INFO'].split('/', 4)
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
return []
account_info = get_account_info(env, self.app, swift_source='FP')
return get_tempurl_keys_from_metadata(account_info['meta'])
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
return lambda app: FormPost(app, conf)
| {
"content_hash": "9a12036d66d572e5d0f04280807f7c2a",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 79,
"avg_line_length": 37.190677966101696,
"alnum_prop": 0.5804944741939159,
"repo_name": "JioCloud/swift",
"id": "a76fcde4116c9543459d1a7ac5cc7462ef640b43",
"size": "18144",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swift/common/middleware/formpost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3849859"
},
{
"name": "Shell",
"bytes": "2933"
}
],
"symlink_target": ""
} |
import pickle as pkl
from mxnet.ndarray import NDArray
import mxnet as mx
from mxnet.test_utils import *
from common import random_seed
from mxnet.base import mx_real_t
from numpy.testing import assert_allclose
import numpy.random as rnd
import numpy as np
import scipy.sparse as spsp
from common import assertRaises, xfail_when_nonstandard_decimal_separator
from mxnet.ndarray.sparse import RowSparseNDArray, CSRNDArray
import pytest
mx.npx.reset_np()
def sparse_nd_ones(shape, stype):
return mx.nd.ones(shape).tostype(stype)
def test_sparse_nd_elemwise_add():
def check_sparse_nd_elemwise_binary(shapes, stypes, f, g):
# generate inputs
nds = []
for i, stype in enumerate(stypes):
if stype == 'row_sparse':
nd, _ = rand_sparse_ndarray(shapes[i], stype)
elif stype == 'default':
nd = mx.nd.array(random_arrays(shapes[i]), dtype = np.float32)
else:
assert(False)
nds.append(nd)
# check result
test = f(nds[0], nds[1])
assert_almost_equal(test.asnumpy(), g(nds[0].asnumpy(), nds[1].asnumpy()))
num_repeats = 3
g = lambda x,y: x + y
op = mx.nd.elemwise_add
for _ in range(num_repeats):
shape = [rand_shape_2d()] * 2
check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g)
def test_sparse_nd_copy():
def check_sparse_nd_copy(from_stype, to_stype, shape):
from_nd = rand_ndarray(shape, from_stype)
# copy to ctx
to_ctx = from_nd.copyto(default_device())
# copy to stype
to_nd = rand_ndarray(shape, to_stype)
to_nd = from_nd.copyto(to_nd)
assert np.sum(np.abs(from_nd.asnumpy() != to_ctx.asnumpy())) == 0.0
assert np.sum(np.abs(from_nd.asnumpy() != to_nd.asnumpy())) == 0.0
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_sparse_nd_copy(stype, 'default', shape)
check_sparse_nd_copy('default', stype, shape)
check_sparse_nd_copy('row_sparse', 'row_sparse', shape_3d)
check_sparse_nd_copy('row_sparse', 'default', shape_3d)
check_sparse_nd_copy('default', 'row_sparse', shape_3d)
def test_sparse_nd_basic():
def check_sparse_nd_basic_rsp():
storage_type = 'row_sparse'
shape = rand_shape_2d()
nd, (v, idx) = rand_sparse_ndarray(shape, storage_type)
assert(nd._num_aux == 1)
assert(nd.indices.dtype == np.int64)
assert(nd.stype == 'row_sparse')
check_sparse_nd_basic_rsp()
def test_sparse_nd_setitem():
def check_sparse_nd_setitem(stype, shape, dst):
x = mx.nd.zeros(shape=shape, stype=stype)
x[:] = dst
dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)
shape = rand_shape_2d()
for stype in ['row_sparse', 'csr']:
# ndarray assignment
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
# numpy assignment
check_sparse_nd_setitem(stype, shape, np.ones(shape))
# scalar assigned to row_sparse NDArray
check_sparse_nd_setitem('row_sparse', shape, 2)
def test_sparse_nd_slice():
shape = (rnd.randint(2, 10), rnd.randint(2, 10))
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
assert same(A[start:end].asnumpy(), A2[start:end])
assert same(A[start - shape[0]:end].asnumpy(), A2[start:end])
assert same(A[start:].asnumpy(), A2[start:])
assert same(A[:end].asnumpy(), A2[:end])
ind = rnd.randint(-shape[0], shape[0] - 1)
assert same(A[ind].asnumpy(), A2[ind][np.newaxis, :])
start_col = rnd.randint(0, shape[1] - 1)
end_col = rnd.randint(start_col + 1, shape[1])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
A = mx.nd.sparse.zeros('csr', shape)
A2 = A.asnumpy()
assert same(A[start:end].asnumpy(), A2[start:end])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
def check_slice_nd_csr_fallback(shape):
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
# non-trivial step should fallback to dense slice op
result = mx.nd.sparse.slice(A, begin=(start,), end=(end + 1,), step=(2,))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start,), end=(end + 1,), step=(2,))
assert same(result_dense.asnumpy(), result.asnumpy())
shape = (rnd.randint(2, 10), rnd.randint(1, 10))
check_slice_nd_csr_fallback(shape)
def test_sparse_nd_concat():
def check_concat(arrays):
ret = np.concatenate([arr.asnumpy() for arr in arrays], axis=0)
same(mx.nd.concat(*arrays, dim=0).asnumpy(), ret)
nds = []
zero_nds = []
ncols = rnd.randint(2, 10)
for _ in range(3):
shape = (rnd.randint(2, 10), ncols)
A, _ = rand_sparse_ndarray(shape, 'csr')
nds.append(A)
zero_nds.append(mx.nd.zeros(shape).tostype('csr'))
check_concat(nds)
check_concat(zero_nds)
def test_sparse_nd_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x == y
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 == y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = 1 == y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_not_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x != y
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 != y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = 1 != y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
def test_sparse_nd_greater():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x > y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y > 0
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = 0 > y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
z = y > 1
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
def test_sparse_nd_greater_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 0
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == 'default'
z = 0 >= y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = y >= 1
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_lesser():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y < x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 < y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = y < 0
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
z = y < 1
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
def test_sparse_nd_lesser_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y <= x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 <= y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == 'default'
z = y <= 0
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = 1 <= y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_binary():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
bdim = 2
lshape = list(oshape)
# one for broadcast op, another for elemwise op
rshape = list(oshape[ndim-bdim:])
for i in range(bdim):
sep = np.random.uniform(0, 1)
if sep < 0.33:
lshape[ndim-i-1] = 1
elif sep < 0.66:
rshape[bdim-i-1] = 1
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs), fn(lhs_nd, rhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
assert_allclose(fn(lhs, lhs), fn(lhs_nd, lhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_binary(lambda x, y: x + y, stype)
check_binary(lambda x, y: x - y, stype)
check_binary(lambda x, y: x * y, stype)
check_binary(lambda x, y: x / y, stype)
check_binary(lambda x, y: x ** y, stype)
check_binary(lambda x, y: x > y, stype)
check_binary(lambda x, y: x < y, stype)
check_binary(lambda x, y: x >= y, stype)
check_binary(lambda x, y: x <= y, stype)
check_binary(lambda x, y: x == y, stype)
@xfail_when_nonstandard_decimal_separator
def test_sparse_nd_binary_scalar_op():
N = 3
def check(fn, stype, out_stype=None):
for _ in range(N):
ndim = 2
shape = np.random.randint(1, 6, size=(ndim,))
npy = np.random.normal(0, 1, size=shape)
nd = mx.nd.array(npy).tostype(stype)
if out_stype is not None:
assert(nd.stype == out_stype)
assert_allclose(fn(npy), fn(nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check(lambda x: 1 + x, stype)
check(lambda x: 1 - x, stype)
check(lambda x: 1 * x, stype)
check(lambda x: 1 / x, stype)
check(lambda x: 2 ** x, stype)
check(lambda x: 1 > x, stype)
check(lambda x: 0.5 > x, stype)
check(lambda x: 0.5 < x, stype)
check(lambda x: 0.5 >= x, stype)
check(lambda x: 0.5 <= x, stype)
check(lambda x: 0.5 == x, stype)
check(lambda x: x / 2, stype, out_stype=stype)
check(lambda x: x + 0, stype, out_stype=stype)
check(lambda x: x - 0, stype, out_stype=stype)
def test_sparse_nd_binary_iop():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
lshape = list(oshape)
rshape = list(oshape)
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs),
fn(lhs_nd, rhs_nd).asnumpy(),
rtol=1e-4, atol=1e-4)
def inplace_add(x, y):
x += y
return x
def inplace_mul(x, y):
x *= y
return x
stypes = ['csr', 'row_sparse']
fns = [inplace_add, inplace_mul]
for stype in stypes:
for fn in fns:
check_binary(fn, stype)
def test_sparse_nd_negate():
def check_sparse_nd_negate(shape, stype):
npy = np.random.uniform(-10, 10, rand_shape_2d())
arr = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert_almost_equal(npy, arr.asnumpy())
shape = rand_shape_2d()
stypes = ['csr', 'row_sparse']
for stype in stypes:
check_sparse_nd_negate(shape, stype)
def test_sparse_nd_broadcast():
sample_num = 1000
# TODO(haibin) test with more than 2 dimensions
def test_broadcast_to(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
def test_broadcast_like(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
target = mx.nd.ones(shape=tuple(target_shape))
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_like(target)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
stypes = ['csr', 'row_sparse']
for stype in stypes:
test_broadcast_to(stype)
test_broadcast_like(stype)
def test_sparse_nd_transpose():
npy = np.random.uniform(-10, 10, rand_shape_2d())
stypes = ['csr', 'row_sparse']
for stype in stypes:
nd = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy.T, (nd.T).asnumpy())
def test_sparse_nd_storage_fallback():
def check_output_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.zeros(shape=shape, stype='csr')
mx.nd.broadcast_add(ones, ones * 2, out=out)
assert(np.sum(out.asnumpy() - 3) == 0)
def check_input_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.broadcast_add(ones.tostype('csr'), ones.tostype('row_sparse'))
assert(np.sum(out.asnumpy() - 2) == 0)
def check_fallback_with_temp_resource(shape):
ones = mx.nd.ones(shape)
out = mx.nd.sum(ones)
assert(out.asscalar() == np.prod(shape))
shape = rand_shape_2d()
check_output_fallback(shape)
check_input_fallback(shape)
check_fallback_with_temp_resource(shape)
def test_sparse_nd_random():
""" test sparse random operator on cpu """
# gpu random operator doesn't use fixed seed
if default_device().device_type is 'gpu':
return
shape = (100, 100)
fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
for fn in fns:
rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
dns_out = mx.nd.zeros(shape=shape, stype='default')
with random_seed(0):
fn(shape=shape, out=dns_out)
with random_seed(0):
fn(shape=shape, out=rsp_out)
assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
def test_sparse_nd_astype():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='float32')
y = x.astype('int32')
assert(y.dtype == np.int32), y.dtype
def test_sparse_nd_astype_copy():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='int32')
y = x.astype('float32')
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('float32', copy=False)
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('int32')
assert (y.dtype == np.int32)
# Test that a new ndarray has been allocated
# even though they have same dtype
assert (id(x) != id(y))
# Test that a new ndarray has not been allocated
y = x.astype('int32', copy=False)
assert (id(x) == id(y))
# Test the string version 'int32'
# has the same behaviour as the np.int32
y = x.astype(np.int32, copy=False)
assert (id(x) == id(y))
def test_sparse_nd_pickle():
dim0 = 40
dim1 = 40
stypes = ['row_sparse', 'csr']
densities = [0, 0.5]
stype_dict = {'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
shape = rand_shape_2d(dim0, dim1)
for stype in stypes:
for density in densities:
a, _ = rand_sparse_ndarray(shape, stype, density)
assert isinstance(a, stype_dict[stype])
data = pkl.dumps(a)
b = pkl.loads(data)
assert isinstance(b, stype_dict[stype])
assert same(a.asnumpy(), b.asnumpy())
@pytest.mark.parametrize('save_fn', [mx.nd.save, mx.npx.savez])
def test_sparse_nd_save_load(save_fn):
stypes = ['default', 'row_sparse', 'csr']
stype_dict = {'default': NDArray, 'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
num_data = 20
densities = [0, 0.5]
fname = 'tmp_list.npz'
data_list1 = []
for _ in range(num_data):
stype = stypes[np.random.randint(0, len(stypes))]
shape = rand_shape_2d(dim0=40, dim1=40)
density = densities[np.random.randint(0, len(densities))]
data_list1.append(rand_ndarray(shape, stype, density))
assert isinstance(data_list1[-1], stype_dict[stype])
if save_fn is mx.nd.save:
save_fn(fname, data_list1)
else:
save_fn(fname, *data_list1)
data_list2 = mx.nd.load(fname)
if save_fn is mx.npx.savez:
data_list2 = [data_list2['arr_' + str(i)] for i in range(num_data)]
assert len(data_list1) == len(data_list2)
for x, y in zip(data_list1, data_list2):
assert same(x.asnumpy(), y.asnumpy())
data_map1 = {'ndarray xx %s' % i: x for i, x in enumerate(data_list1)}
if save_fn is mx.nd.save:
save_fn(fname, data_map1)
else:
save_fn(fname, **data_map1)
data_map2 = mx.nd.load(fname)
assert len(data_map1) == len(data_map2)
for k, x in data_map1.items():
y = data_map2[k]
assert same(x.asnumpy(), y.asnumpy())
os.remove(fname)
@pytest.mark.parametrize('save_fn', [mx.nd.save, mx.npx.savez])
def test_sparse_ndarray_load_csr_npz_scipy(tmp_path, save_fn):
csr_sp = spsp.rand(50, 100, density=0.5, format="csr")
spsp.save_npz(tmp_path / "csr.npz", csr_sp)
csr_mx = mx.nd.load(str(tmp_path / "csr.npz"))['']
assert np.sum(csr_mx.data.asnumpy() != csr_sp.data) == 0
assert np.sum(csr_mx.indices.asnumpy() != csr_sp.indices) == 0
assert np.sum(csr_mx.indptr.asnumpy() != csr_sp.indptr) == 0
csr_mx = save_fn(str(tmp_path / "csr_mx.npz"), csr_mx)
csr_mx_loaded = mx.nd.load(str(tmp_path / "csr_mx.npz"))
csr_mx_loaded = csr_mx_loaded[0] if save_fn is mx.nd.save else csr_mx_loaded['arr_0']
assert np.sum(csr_mx_loaded.data.asnumpy() != csr_sp.data) == 0
assert np.sum(csr_mx_loaded.indices.asnumpy() != csr_sp.indices) == 0
assert np.sum(csr_mx_loaded.indptr.asnumpy() != csr_sp.indptr) == 0
def test_sparse_nd_unsupported():
nd = mx.nd.zeros((2,2), stype='row_sparse')
fn_slice = lambda x: x._slice(None, None)
fn_at = lambda x: x._at(None)
fn_reshape = lambda x: x.reshape(None)
fns = [fn_slice, fn_at, fn_reshape]
for fn in fns:
try:
fn(nd)
assert(False)
except:
pass
def test_create_csr():
def check_create_csr_from_nd(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
# create data array with provided dtype and ctx
data = mx.nd.array(matrix.data.asnumpy(), dtype=dtype)
indptr = matrix.indptr
indices = matrix.indices
csr_created = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=shape)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), data.asnumpy())
assert same(csr_created.indptr.asnumpy(), indptr.asnumpy())
assert same(csr_created.indices.asnumpy(), indices.asnumpy())
# verify csr matrix dtype and ctx is consistent from the ones provided
assert csr_created.dtype == dtype, (csr_created, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == mx.context.current_context(), (csr_created.context, mx.context.current_context())
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
def check_create_csr_from_coo(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
sp_csr = matrix.asscipy()
sp_coo = sp_csr.tocoo()
csr_created = mx.nd.sparse.csr_matrix((sp_coo.data, (sp_coo.row, sp_coo.col)), shape=shape, dtype=dtype)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), sp_csr.data)
assert same(csr_created.indptr.asnumpy(), sp_csr.indptr)
assert same(csr_created.indices.asnumpy(), sp_csr.indices)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
# verify csr matrix dtype and ctx is consistent
assert csr_created.dtype == dtype, (csr_created.dtype, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == mx.context.current_context(), (csr_created.context, mx.context.current_context())
def check_create_csr_from_scipy(shape, density, f):
def assert_csr_almost_equal(nd, sp):
assert_almost_equal(nd.data.asnumpy(), sp.data)
assert_almost_equal(nd.indptr.asnumpy(), sp.indptr)
assert_almost_equal(nd.indices.asnumpy(), sp.indices)
sp_csr = nd.asscipy()
assert_almost_equal(sp_csr.data, sp.data)
assert_almost_equal(sp_csr.indptr, sp.indptr)
assert_almost_equal(sp_csr.indices, sp.indices)
assert(sp.dtype == sp_csr.dtype), (sp.dtype, sp_csr.dtype)
# random canonical csr
csr_sp = spsp.rand(shape[0], shape[1], density, format="csr")
csr_nd = f(csr_sp)
assert_csr_almost_equal(csr_nd, csr_sp)
# non-canonical csr which contains duplicates and unsorted indices
indptr = np.array([0, 2, 3, 7])
indices = np.array([0, 2, 2, 0, 1, 2, 1])
data = np.array([1, 2, 3, 4, 5, 6, 1])
non_canonical_csr = spsp.csr_matrix((data, indices, indptr), shape=(3, 3), dtype=csr_nd.dtype)
canonical_csr_nd = f(non_canonical_csr, dtype=csr_nd.dtype)
canonical_csr_sp = non_canonical_csr.copy()
canonical_csr_sp.sum_duplicates()
canonical_csr_sp.sort_indices()
assert_csr_almost_equal(canonical_csr_nd, canonical_csr_sp)
dim0 = 20
dim1 = 20
densities = [0, 0.5]
dtype = np.float64
for density in densities:
shape = rand_shape_2d(dim0, dim1)
check_create_csr_from_nd(shape, density, dtype)
check_create_csr_from_coo(shape, density, dtype)
check_create_csr_from_scipy(shape, density, mx.nd.sparse.array)
check_create_csr_from_scipy(shape, density, mx.nd.array)
def test_create_row_sparse():
dim0 = 50
dim1 = 50
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape)
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
# add this test since we added np.int32 and np.int64 to integer_types
if len(shape) == 2:
for np_int_type in (np.int32, np.int64):
shape = list(shape)
shape = [np_int_type(x) for x in shape]
arg1 = tuple(shape)
mx.nd.sparse.row_sparse_array(arg1, tuple(shape))
shape[0] += 1
assert_exception(mx.nd.sparse.row_sparse_array, ValueError, arg1, tuple(shape))
def test_create_sparse_nd_infer_shape():
def check_create_csr_infer_shape(shape, density, dtype):
try:
matrix = rand_ndarray(shape, 'csr', density=density)
data = matrix.data
indptr = matrix.indptr
indices = matrix.indices
nd = mx.nd.sparse.csr_matrix((data, indices, indptr), dtype=dtype)
num_rows, num_cols = nd.shape
assert(num_rows == len(indptr) - 1)
assert(indices.shape[0] > 0), indices
assert(np.sum((num_cols <= indices).asnumpy()) == 0)
assert(nd.dtype == dtype), (nd.dtype, dtype)
# cannot infer on invalid shape
except ValueError:
pass
def check_create_rsp_infer_shape(shape, density, dtype):
try:
array = rand_ndarray(shape, 'row_sparse', density=density)
data = array.data
indices = array.indices
nd = mx.nd.sparse.row_sparse_array((data, indices), dtype=dtype)
inferred_shape = nd.shape
assert(inferred_shape[1:] == data.shape[1:])
assert(indices.ndim > 0)
assert(nd.dtype == dtype)
if indices.shape[0] > 0:
assert(np.sum((inferred_shape[0] <= indices).asnumpy()) == 0)
# cannot infer on invalid shape
except ValueError:
pass
dtype = np.int32
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0, 0.5, 1]
for density in densities:
check_create_csr_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape_3d, density, dtype)
def test_create_sparse_nd_from_dense():
def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
arr = f(dense_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(dense_arr)
assert(arr2.dtype == default_dtype)
assert(arr2.context == mx.context.current_context())
shape = rand_shape_2d()
dtype = np.int32
src_dtype = np.float64
ctx = mx.cpu(1)
dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
np.ones(shape, dtype=src_dtype).tolist()]
for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
for dense_arr in dense_arrs:
default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
else np.float32
check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
def test_create_sparse_nd_from_sparse():
def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
arr = f(sp_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(sp_arr)
assert(arr2.dtype == src_dtype)
assert(arr2.context == mx.context.current_context())
shape = rand_shape_2d()
src_dtype = np.float64
dtype = np.int32
ctx = mx.cpu(1)
ones = mx.nd.ones(shape, dtype=src_dtype)
csr_arrs = [ones.tostype('csr')]
rsp_arrs = [ones.tostype('row_sparse')]
csr_sp = spsp.csr_matrix(np.ones(shape, dtype=src_dtype))
csr_arrs.append(csr_sp)
f_csr = mx.nd.sparse.csr_matrix
f_rsp = mx.nd.sparse.row_sparse_array
for sp_arr in csr_arrs:
check_create_from_sp(shape, f_csr, sp_arr, dtype, src_dtype, ctx)
for sp_arr in rsp_arrs:
check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx)
def test_create_sparse_nd_empty():
def check_empty(shape, stype):
arr = mx.nd.empty(shape, stype=stype)
assert(arr.stype == stype)
assert same(arr.asnumpy(), np.zeros(shape))
def check_csr_empty(shape, dtype, ctx):
arr = mx.nd.sparse.csr_matrix(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'csr')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.csr_matrix(shape)
assert(arr.dtype == np.float32)
assert(arr.context == mx.context.current_context())
def check_rsp_empty(shape, dtype, ctx):
arr = mx.nd.sparse.row_sparse_array(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'row_sparse')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.row_sparse_array(shape)
assert(arr.dtype == np.float32)
assert(arr.context == mx.context.current_context())
stypes = ['csr', 'row_sparse']
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
dtype = np.int32
ctx = mx.cpu(1)
for stype in stypes:
check_empty(shape, stype)
check_csr_empty(shape, dtype, ctx)
check_rsp_empty(shape, dtype, ctx)
check_rsp_empty(shape_3d, dtype, ctx)
def test_synthetic_dataset_generator():
def test_powerlaw_generator(csr_arr, final_row=1):
"""Test power law distribution
Total Elements: 32000, Number of zeros: 3200
Every row has 2 * non zero elements of the previous row.
Also since (2047 < 3200 < 4095) this will be true till 10th row"""
indices = csr_arr.indices.asnumpy()
indptr = csr_arr.indptr.asnumpy()
for row in range(1, final_row + 1):
nextrow = row + 1
current_row_nnz = indices[indptr[row] - 1] + 1
next_row_nnz = indices[indptr[nextrow] - 1] + 1
assert next_row_nnz == 2 * current_row_nnz
# Test if density is preserved
csr_arr_cols, _ = rand_sparse_ndarray(shape=(32, 10000), stype="csr",
density=0.01, distribution="powerlaw")
csr_arr_small, _ = rand_sparse_ndarray(shape=(5, 5), stype="csr",
density=0.5, distribution="powerlaw")
csr_arr_big, _ = rand_sparse_ndarray(shape=(32, 1000000), stype="csr",
density=0.4, distribution="powerlaw")
csr_arr_square, _ = rand_sparse_ndarray(shape=(1600, 1600), stype="csr",
density=0.5, distribution="powerlaw")
assert len(csr_arr_cols.data) == 3200
test_powerlaw_generator(csr_arr_cols, final_row=9)
test_powerlaw_generator(csr_arr_small, final_row=1)
test_powerlaw_generator(csr_arr_big, final_row=4)
test_powerlaw_generator(csr_arr_square, final_row=6)
def test_sparse_nd_fluent():
def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False):
with mx.name.NameManager():
data = mx.nd.random_uniform(shape=shape, ctx=default_device()).tostype(stype)
regular = getattr(mx.ndarray, func)(data, **kwargs)
fluent = getattr(data, func)(**kwargs)
if isinstance(regular, list):
for r, f in zip(regular, fluent):
assert almost_equal(r.asnumpy(), f.asnumpy(), equal_nan=equal_nan)
else:
assert almost_equal(regular.asnumpy(), fluent.asnumpy(), equal_nan=equal_nan)
all_funcs = ['zeros_like', 'square', 'round', 'rint', 'fix', 'floor', 'ceil', 'trunc',
'abs', 'sign', 'sin', 'degrees', 'radians', 'expm1']
for func in all_funcs:
check_fluent_regular('csr', func, {})
check_fluent_regular('row_sparse', func, {})
all_funcs = ['arcsin', 'arctan', 'tan', 'sinh', 'tanh',
'arcsinh', 'arctanh', 'log1p', 'sqrt', 'relu']
for func in all_funcs:
check_fluent_regular('csr', func, {}, equal_nan=True)
check_fluent_regular('row_sparse', func, {}, equal_nan=True)
check_fluent_regular('csr', 'slice', {'begin': (2, 5), 'end': (4, 7)}, shape=(5, 17))
check_fluent_regular('row_sparse', 'clip', {'a_min': -0.25, 'a_max': 0.75})
check_fluent_regular('csr', 'clip', {'a_min': -0.25, 'a_max': 0.75})
for func in ['sum', 'mean', 'norm']:
check_fluent_regular('csr', func, {'axis': 0})
def test_sparse_nd_exception():
""" test invalid sparse operator will throw a exception """
a = mx.nd.ones((2,2))
assertRaises(mx.base.MXNetError, mx.nd.sparse.retain, a, invalid_arg="garbage_value")
assertRaises(ValueError, mx.nd.sparse.csr_matrix, a, shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.csr_matrix, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.row_sparse_array, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.zeros, "invalid_stype", (2,2))
def test_sparse_nd_check_format():
""" test check_format for sparse ndarray """
shape = rand_shape_2d()
stypes = ["csr", "row_sparse"]
for stype in stypes:
arr, _ = rand_sparse_ndarray(shape, stype)
arr.check_format()
arr = mx.nd.sparse.zeros(stype, shape)
arr.check_format()
# CSR format index pointer array should be less than the number of rows
shape = (3, 4)
data_list = [7, 8, 9]
indices_list = [0, 2, 1]
indptr_list = [0, 5, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should be in ascending order per row
indices_list = [2, 1, 1]
indptr_list = [0, 2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indptr should end with value equal with size of indices
indices_list = [1, 2, 1]
indptr_list = [0, 2, 2, 4]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should not be negative
indices_list = [0, 2, 1]
indptr_list = [0, -2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format should be 2 Dimensional.
a = mx.nd.array([1, 2, 3])
assertRaises(ValueError, a.tostype, 'csr')
a = mx.nd.array([[[1, 2, 3]]])
assertRaises(ValueError, a.tostype, 'csr')
# Row Sparse format indices should be less than the number of rows
shape = (3, 2)
data_list = [[1, 2], [3, 4]]
indices_list = [1, 4]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be in ascending order
indices_list = [1, 0]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should not be negative
indices_list = [1, -2]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
def test_sparse_nd_norm():
def check_sparse_nd_norm(stype, shape, density, **kwargs):
data, _ = rand_sparse_ndarray(shape, stype, density)
norm = data.norm(**kwargs)
expected_norm = data.tostype('default').norm(**kwargs)
assert_almost_equal(norm.asnumpy(), expected_norm.asnumpy())
shape = (5, 5)
stypes = ['row_sparse', 'csr']
densities = [0, 0.5, 1]
for stype in stypes:
for density in densities:
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=False, ord=2)
# test fallback
check_sparse_nd_norm(stype, shape, density, axis=0, keepdims=False, ord=2)
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=True, ord=2)
def test_sparse_fc():
def check_sparse_fc(batch_size, dim_in, dim_out, stype):
data = rand_ndarray((batch_size, dim_in), stype, density=0.5)
weight = rand_ndarray((dim_out, dim_in), 'row_sparse', density=1)
bias = rand_ndarray((dim_out, 1), 'row_sparse', density=1)
out = mx.nd.sparse.FullyConnected(data, weight, num_hidden=dim_out, bias=bias)
data_dns = data.tostype('default')
weight_dns = weight.tostype('default')
out_dns = mx.nd.FullyConnected(data_dns, weight_dns, num_hidden=dim_out, bias=bias)
assert_almost_equal(out.asnumpy(), out_dns.asnumpy())
# test FC with row_sparse weight w/ density=1, dense data
check_sparse_fc(5, 10, 8, 'default')
# test FC with row_sparse weight w/ density=1, csr data (fallback)
check_sparse_fc(5, 10, 8, 'csr')
def test_sparse_take():
def check_sparse_take(density, mode):
data_shape = rand_shape_2d()
idx_shape = (np.random.randint(low=1, high=10),)
data = rand_ndarray(data_shape, 'csr', density=density).astype('int32')
idx = mx.nd.array(np.random.randint(low=-5, high=15, size=idx_shape))
data_np = data.asnumpy()
idx_np = idx.asnumpy().astype('int32')
expected_result = np.take(data_np, idx_np, mode=mode, axis=0)
result = mx.nd.take(data, idx, mode=mode)
assert_almost_equal(result.asnumpy(), expected_result)
assert result.indptr[0].asscalar() == 0
densities = [0, 0.5, 1]
modes = ['clip', 'wrap']
for d in densities:
for m in modes:
check_sparse_take(d, m)
def test_sparse_getnnz():
if default_device().device_type is 'gpu':
return
def check_sparse_getnnz(density, axis):
shape = rand_shape_2d()
data = rand_ndarray(shape, 'csr', density=density)
data_sp = data.asscipy()
result = mx.nd.contrib.getnnz(data, axis=axis)
expected_result = data_sp.getnnz(axis=axis)
assert_almost_equal(result.asnumpy(), expected_result)
densities = [0, 0.5, 1]
axis = [1, None]
for d in densities:
for a in axis:
check_sparse_getnnz(d, a)
| {
"content_hash": "f0d0d98f9a5166525e59ff29b9c8e674",
"timestamp": "",
"source": "github",
"line_count": 1024,
"max_line_length": 119,
"avg_line_length": 39.572265625,
"alnum_prop": 0.587508020334633,
"repo_name": "tlby/mxnet",
"id": "6a338ec3b1b9adc3a60663b74dd17aafbdeba676",
"size": "41308",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/python/unittest/test_sparse_ndarray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "178935"
},
{
"name": "C++",
"bytes": "6790570"
},
{
"name": "CMake",
"bytes": "90505"
},
{
"name": "Clojure",
"bytes": "511901"
},
{
"name": "Cuda",
"bytes": "884341"
},
{
"name": "Dockerfile",
"bytes": "75935"
},
{
"name": "Groovy",
"bytes": "97521"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "188428"
},
{
"name": "Julia",
"bytes": "436277"
},
{
"name": "Jupyter Notebook",
"bytes": "3633690"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "185515"
},
{
"name": "Perl",
"bytes": "1551488"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "11318"
},
{
"name": "Python",
"bytes": "6532381"
},
{
"name": "R",
"bytes": "357766"
},
{
"name": "Scala",
"bytes": "1258332"
},
{
"name": "Shell",
"bytes": "406483"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_class,
get_element_by_id,
unified_strdate,
)
class FreesoundIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?freesound\.org/people/[^/]+/sounds/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
'md5': '12280ceb42c81f19a515c745eae07650',
'info_dict': {
'id': '194503',
'ext': 'mp3',
'title': 'gulls in the city.wav',
'description': 'the sounds of seagulls in the city',
'duration': 130.233,
'uploader': 'miklovan',
'upload_date': '20130715',
'tags': list,
}
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
audio_url = self._og_search_property('audio', webpage, 'song url')
title = self._og_search_property('audio:title', webpage, 'song title')
description = self._html_search_regex(
r'(?s)id=["\']sound_description["\'][^>]*>(.+?)</div>',
webpage, 'description', fatal=False)
duration = float_or_none(
get_element_by_class('duration', webpage), scale=1000)
upload_date = unified_strdate(get_element_by_id('sound_date', webpage))
uploader = self._og_search_property(
'audio:artist', webpage, 'uploader', fatal=False)
channels = self._html_search_regex(
r'Channels</dt><dd>(.+?)</dd>', webpage,
'channels info', fatal=False)
tags_str = get_element_by_class('tags', webpage)
tags = re.findall(r'<a[^>]+>([^<]+)', tags_str) if tags_str else None
audio_urls = [audio_url]
LQ_FORMAT = '-lq.mp3'
if LQ_FORMAT in audio_url:
audio_urls.append(audio_url.replace(LQ_FORMAT, '-hq.mp3'))
formats = [{
'url': format_url,
'format_note': channels,
'quality': quality,
} for quality, format_url in enumerate(audio_urls)]
self._sort_formats(formats)
return {
'id': audio_id,
'title': title,
'description': description,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'tags': tags,
'formats': formats,
}
| {
"content_hash": "6282bda2c2cbd83e400a6b5ca945ce8d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 87,
"avg_line_length": 31.59493670886076,
"alnum_prop": 0.5384615384615384,
"repo_name": "aboutsajjad/Bridge",
"id": "138b6bc58cf9aa282c8afc3b6498ba84884197fc",
"size": "2496",
"binary": false,
"copies": "64",
"ref": "refs/heads/master",
"path": "app_packages/youtube_dl/extractor/freesound.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2532435"
},
{
"name": "C++",
"bytes": "338713"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Objective-C",
"bytes": "3570"
},
{
"name": "Python",
"bytes": "6743963"
},
{
"name": "Ruby",
"bytes": "508"
},
{
"name": "Swift",
"bytes": "33266"
}
],
"symlink_target": ""
} |
'''
Author: Bu Kun
E-mail: bukun@osgeo.cn
CopyRight: http://www.yunsuan.org
'''
import time
import tornado.escape
import config
import peewee
import datetime
# from torlite.model.core_tab import CabWiki2Catalog
from torlite.core import tools
from torlite.model.core_tab import CabWiki
class MWiki():
def __init__(self):
try:
CabWiki.create_table()
except:
pass
def update(self, uid, post_data):
print(post_data['src_type'][0])
if post_data['src_type'][0] == '1':
cnt_html = tools.rst2html(post_data['cnt_md'][0])
else:
cnt_html = tools.markdown2html(post_data['cnt_md'][0])
entry = CabWiki.update(
title=post_data['title'][0],
date=datetime.datetime.now(),
cnt_html=cnt_html,
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'][0]),
time_update=time.time(),
# id_spec=id_spec,
# logo=post_data['logo'][0],
src_type=post_data['src_type'][0]
).where(CabWiki.uid == uid)
entry.execute()
def insert_data(self, post_data):
title = post_data['title'][0]
uu = self.get_by_wiki(title)
if uu is None:
pass
else:
return (False)
if 'id_spec' in post_data:
id_spec = post_data['id_spec'][0]
else:
id_spec = 0
if post_data['src_type'][0] == '1':
cnt_html = tools.rst2html(post_data['cnt_md'][0])
else:
cnt_html = tools.markdown2html(post_data['cnt_md'][0])
entry = CabWiki.create(
title=post_data['title'][0],
date=datetime.datetime.now(),
cnt_html=cnt_html,
uid=tools.get_uu8d(),
time_create=time.time(),
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'][0]),
time_update=time.time(),
view_count=1,
src_type=post_data['src_type'][0]
)
return (entry.uid)
def query_old(self):
return CabWiki.select().order_by('time_update').limit(10)
def query_random(self, num=6):
if config.dbtype == 1 or config.dbtype == 3:
return CabWiki.select().order_by(peewee.fn.Random()).limit(num)
elif config.dbtype == 2:
return CabWiki.select().order_by(peewee.fn.Rand()).limit(num)
def get_by_id(self, in_uid):
tt = CabWiki.select().where(CabWiki.uid == in_uid).count()
if tt == 0:
return None
else:
return CabWiki.get(CabWiki.uid == in_uid)
def get_by_title(self, in_title):
try:
return CabWiki.get(CabWiki.title == in_title)
except:
return None
def get_num_by_cat(self, cat_str):
return CabWiki.select().where(CabWiki.id_cats.contains(',{0},'.format(cat_str))).count()
def query_recent(self, num=8):
return CabWiki.select().order_by(CabWiki.time_update.desc()).limit(num)
def query_dated(self, num=8):
return CabWiki.select().order_by(CabWiki.time_update).limit(num)
def query_most(self, num=8):
return CabWiki.select().order_by(CabWiki.view_count.desc()).limit(num)
def query_recent_most(self, num=8, recent=30):
time_that = int(time.time()) - 30 * 24 * 3600
return CabWiki.select().where(CabWiki.time_update > time_that).order_by(CabWiki.view_count.desc()).limit(num)
def query_cat_by_pager(self, cat_str, cureent):
tt = CabWiki.select().where(CabWiki.id_cats.contains(str(cat_str))).order_by(
CabWiki.time_update.desc()).paginate(cureent, config.page_num)
return tt
def update_view_count(self, citiao):
entry = CabWiki.update(view_count=CabWiki.view_count + 1).where(CabWiki.title == citiao)
entry.execute()
def update_view_count_by_uid(self, uid):
entry = CabWiki.update(view_count=CabWiki.view_count + 1).where(CabWiki.uid == uid)
entry.execute()
def get_by_wiki(self, citiao):
tt = CabWiki.select().where(CabWiki.title == citiao).count()
if tt == 0:
return None
else:
self.update_view_count(citiao)
return CabWiki.get(CabWiki.title == citiao)
def get_next_record(self, in_uid):
current_rec = self.get_by_id(in_uid)
query = CabWiki.select().where(CabWiki.time_update < current_rec.time_update).order_by(
CabWiki.time_update.desc())
if query.count() == 0:
return None
else:
return query.get()
def get_previous_record(self, in_uid):
current_rec = self.get_by_id(in_uid)
query = CabWiki.select().where(CabWiki.time_update > current_rec.time_update).order_by(CabWiki.time_update)
if query.count() == 0:
return None
else:
return query.get()
def query_by_spec(self, spec_id):
tt = CabWiki.select().where(CabWiki.id_spec == spec_id).order_by(CabWiki.time_update.desc())
return tt
def get_by_keyword(self, par2):
return CabWiki.select().where(CabWiki.title.contains(par2)).order_by(CabWiki.time_update.desc()).limit(20) | {
"content_hash": "423ee49e160770908085cea332526bd8",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 117,
"avg_line_length": 31.904191616766468,
"alnum_prop": 0.5762012012012012,
"repo_name": "Geoion/TorCMS",
"id": "cfd7e6d454a7088756a6b92ef4b019e83dfb2a85",
"size": "5351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "torlite/model/mwiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "172628"
},
{
"name": "HTML",
"bytes": "95401"
},
{
"name": "JavaScript",
"bytes": "112477"
},
{
"name": "PLpgSQL",
"bytes": "297339"
},
{
"name": "Python",
"bytes": "123980"
},
{
"name": "Shell",
"bytes": "505"
}
],
"symlink_target": ""
} |
"""blackjack project. test"""
import blackjack
card = blackjack.Card()
assert card.value == 1
deck = blackjack.Deck()
assert len(deck.cards) == 52
cstack = blackjack.CardStack(2)
assert len(cstack.stack) == 104
cstack.draw()
assert len(cstack.stack) == 103
hand = blackjack.Hand()
hand.update_cards(card)
assert hand.values[0] == 1
assert hand.values[1] == 11
num_players = int(raw_input('enter the number of player '))
num_players = int(num_players)
game = blackjack.MainGame(num_players)
assert len(game.cardstack.stack) == 52 * 8
game.play()
| {
"content_hash": "a677b41301004b9bdd2c94ea348450c0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 59,
"avg_line_length": 22.04,
"alnum_prop": 0.7150635208711433,
"repo_name": "ayine17/Final-project",
"id": "d7500b5a28bd23a381e6e70fccea3e003af6301f",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finalproject/testfinal.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "31781"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from distutils.sysconfig import get_python_lib
import json
import socket
import os
import gzip
import shutil
from urllib import urlopen
from contextlib import closing
import subprocess
from progressbar import ProgressBar, ETA, Percentage, RotatingMarker, Bar
import re
from datetime import datetime, timedelta
lib_path = get_python_lib()
#filename = 'uniprot_trembl.dat'
filename = 'uniprot_sprot.dat'
db_path = '{:s}/uniprot_data/{:s}'.format(lib_path, filename)
server = {'path': '/pub/databases/uniprot/current_release/knowledgebase/complete/',
'url': 'ftp.uniprot.org'}
proto = ['ftp://']
select_proto = None
config_file = '{:s}/uniprot_data/version.json'.format(lib_path)
def save_local_config(config):
with open(config_file, "w") as f:
json.dump(config, f)
def get_local_config():
if not os.path.exists(config_file):
folder = '/'.join(config_file.split('/')[:-1])
if not os.path.exists(folder):
os.mkdir(folder)
config = {'version': '2010_01',
'status': []}
save_local_config(config)
with open(config_file, 'r') as f:
config = json.load(f)
return config
def milestone(function):
config = get_local_config()
def wrapper(*args):
name = function.__name__
pending = name not in config['status']
if pending:
#try:
pending = function(*args)
if not pending:
config['status'].append(name)
save_local_config(config)
#except Exception, e:
# return False
return pending
return wrapper
def silent_remove(filename):
if os.path.exists(filename):
os.remove(filename)
@milestone
def clean_old_db(filename):
silent_remove(filename)
return True
def get_max_version(protocol):
url = '{0}{1[url]}{1[path]}reldate.txt'.format(protocol, server)
with closing(urlopen(url)) as conn:
if "ftp" in protocol:
line = conn.read()
versions = re.findall(r'Release ([0-9\_]*) consists', line)
elif "http" in protocol:
# TODO: uniprot doesn't support the http procotol.
hrefs = filter(lambda l: "href=\"Pfam" in l, conn.readlines())
versions = map(lambda l: float(re.sub('^.+href="Pfam([0-9\.]+).+$',
r'\1',l)), hrefs)
return max(versions)
def get_available(config):
global select_proto
available = None
print("->\tUniprot: Get availables versions from {:s}".format(server['url']))
for p in proto:
try:
available = get_max_version(p)
select_proto = p
break
except socket.error:
print("{:s} failed.".format(p))
# If the server is offline assume there is no new version.
if not available:
print("Discarded new version search.")
available = config['version']
return available
def get_versions():
config = get_local_config()
remote = get_available(config)
return config['version'], remote
@milestone
def download(origin, destiny):
return not subprocess.call("wget -c {} -O {}".format(origin, destiny).split(" "))
@milestone
def download_gziped(remote):
path_pattern = '{0}{1[url]}{1[path]}{3}.gz'
origin = path_pattern.format(select_proto, server, remote, filename)
destiny = '{:s}.gz'.format(db_path)
clean_old_db(destiny)
ready = download(origin, destiny)
return destiny if ready else ready
@milestone
def export(destiny):
print("->\tUniprot: Extracting database")
silent_remove(db_path)
with open(db_path, 'wb') as f_out, gzip.open(destiny, 'rb') as f_in:
ready = False
try:
shutil.copyfileobj(f_in, f_out)
ready = True
except Exception:
pass
return ready
@milestone
def resume_database():
print("->\tUniprot: Select the records ID and the Pfam DR (database "
"references)")
dt = datetime.now() + timedelta(minutes=3)
print(" \tThis should be ready for {}".format(dt.time()))
temp_file = db_path + ".tmp"
transform = '{} > {}'.format(db_path, temp_file)
command = 'awk "/^(ID |DR Pfam)/{ print $1; }" '
command += transform
result = not os.system(command)
print("->\tUniprot: Updating database")
if result:
shutil.move(temp_file, db_path)
return result
def update():
local, remote = get_versions()
if local < remote:
print("->\tUniprot: Update from {:s} to {:s}".format(local, remote))
destiny = download_gziped(remote)
export(destiny)
resume_database()
config = {'version': remote, 'status': []}
save_local_config(config)
else:
print("->\tUniprot: The database is updated (version {})".format(local))
| {
"content_hash": "601abc7557adf902b32cdc56a5c03070",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 85,
"avg_line_length": 29.53012048192771,
"alnum_prop": 0.6034271725826194,
"repo_name": "ecolell/uniprotkbserver",
"id": "b8ed978b0288e2b8aa43a491cd776447153159aa",
"size": "4926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uniprotkbserver/autoupdate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2365"
},
{
"name": "Makefile",
"bytes": "3089"
},
{
"name": "Python",
"bytes": "14160"
}
],
"symlink_target": ""
} |
import math
days = 60
daily_rets = 0.001
daily_rf = 0.0002
std_daily = 0.001
k = math.sqrt(252)
def sharpe_daily(daily_rets, daily_rf, std_daily):
"""
Sharpe ration: square root of 252 days (frequency of sampling)
multiplied by mean of daily returns minus daily risk free returns
divided by std deviation of daily returns
:param daily_rets: mean of daily returns
:param daily_rf: mean of risk free returns
:param std_daily: std deviation of daily returns
:return: share ratio
"""
return k * (daily_rets - daily_rf) / std_daily
if __name__ == '__main__':
print "Sharpe ration:", sharpe_daily(daily_rets, daily_rf, std_daily)
| {
"content_hash": "8f36204d18c5e8fa33ccb571a724f8b7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 28.041666666666668,
"alnum_prop": 0.6775631500742942,
"repo_name": "dmytroKarataiev/MachineLearning",
"id": "6e0dbdd0db9141ee6b82b88d820d71e57af14d5d",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learning/sharpe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1786493"
},
{
"name": "Jupyter Notebook",
"bytes": "969870"
},
{
"name": "Python",
"bytes": "145291"
}
],
"symlink_target": ""
} |
from platform import python_version
from setuptools import setup
def readme():
with open('README.md') as readme_file:
return readme_file.read()
setup(
name='comment_parser',
version='1.2.4',
description='Parse comments from various source files.',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License'
],
url='http://github.com/jeanralphaviles/comment_parser',
author='Jean-Ralph Aviles',
author_email='jeanralph.aviles+pypi@gmail.com',
license='MIT',
long_description=readme(),
long_description_content_type='text/markdown',
packages=['comment_parser', 'comment_parser.parsers'],
install_requires=['python-magic>=0.4.24,<0.5.0'],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False,
python_requires='>=3.7',
)
| {
"content_hash": "b727c1b5df654081142dd42a3dd57e18",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 30.4375,
"alnum_prop": 0.6560574948665298,
"repo_name": "jeanralphaviles/comment_parser",
"id": "cee137dedf328b4c12513099b93b4378d8baf754",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36467"
}
],
"symlink_target": ""
} |
"""py.test for modeleditor"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import product
import os
import warnings
import pytest
from six import StringIO
from six import string_types
from eppy import modeleditor
from eppy.bunch_subclass import Bunch
from eppy.iddcurrent import iddcurrent
from eppy.modeleditor import IDF
from eppy.pytest_helpers import almostequal
import eppy.snippet as snippet
iddsnippet = iddcurrent.iddtxt
idfsnippet = snippet.idfsnippet
# idffhandle = StringIO(idfsnippet)
# iddfhandle = StringIO(iddsnippet)
# bunchdt, data, commdct, gdict = idfreader.idfreader(idffhandle, iddfhandle, None)
# idd is read only once in this test
# if it has already been read from some other test, it will continue with
# the old reading
iddfhandle = StringIO(iddcurrent.iddtxt)
if IDF.getiddname() == None:
IDF.setiddname(iddfhandle)
def test_poptrailing():
"""py.test for poptrailing"""
tdata = (
(
[1, 2, 3, '', 56, '', '', '', ''],
[1, 2, 3, '', 56]
), # lst, popped
(
[1, 2, 3, '', 56],
[1, 2, 3, '', 56]
), # lst, popped
(
[1, 2, 3, 56],
[1, 2, 3, 56]
), # lst, popped
)
for before, after in iter(tdata):
assert modeleditor.poptrailing(before) == after
def test_extendlist():
"""py.test for extendlist"""
tdata = (
([1, 2, 3], 2, 0, [1, 2, 3]), # lst, i, value, nlst
([1, 2, 3], 3, 0, [1, 2, 3, 0]), # lst, i, value, nlst
([1, 2, 3], 5, 0, [1, 2, 3, 0, 0, 0]), # lst, i, value, nlst
([1, 2, 3], 7, 0, [1, 2, 3, 0, 0, 0, 0, 0]), # lst, i, value, nlst
)
for lst, i, value, nlst in tdata:
modeleditor.extendlist(lst, i, value=value)
assert lst == nlst
def test_namebunch():
"""py.test for namebunch"""
thedata = (
(
Bunch(dict(Name="", a=5)),
"yay", "yay"
), # abunch, aname, thename
(
Bunch(dict(Name=None, a=5)),
"yay", None
), # abunch, aname, thename
)
for abunch, aname, thename in thedata:
result = modeleditor.namebunch(abunch, aname)
assert result.Name == thename
def test_getnamedargs():
"""py.test for getnamedargs"""
result = dict(a=1, b=2, c=3)
assert result == modeleditor.getnamedargs(a=1, b=2, c=3)
assert result == modeleditor.getnamedargs(dict(a=1, b=2, c=3))
assert result == modeleditor.getnamedargs(dict(a=1, b=2), c=3)
assert result == modeleditor.getnamedargs(dict(a=1), c=3, b=2)
def test_getrefnames():
"""py.test for getrefnames"""
tdata = (
(
'ZONE',
[
'ZoneNames', 'OutFaceEnvNames', 'ZoneAndZoneListNames',
'AirflowNetworkNodeAndZoneNames'
]
), # objkey, therefs
(
'FluidProperties:Name'.upper(),
['FluidNames', 'FluidAndGlycolNames']
), # objkey, therefs
('Building'.upper(), []), # objkey, therefs
)
for objkey, therefs in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.getrefnames(idf, objkey)
assert result == therefs
def test_getallobjlists():
"""py.test for getallobjlists"""
tdata = (
(
'TransformerNames',
[
(
'ElectricLoadCenter:Distribution'.upper(),
'TransformerNames',
[10, ]
),
],
), # refname, objlists
)
for refname, objlists in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.getallobjlists(idf, refname)
assert result == objlists
def test_rename():
"""py.test for rename"""
idftxt = """Material,
G01a 19mm gypsum board, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
Construction,
Other Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
G01a 19mm gypsum board, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
"""
ridftxt = """Material,
peanut butter, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
peanut butter, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
peanut butter; !- Layer 3
Construction,
Other Wall, !- Name
peanut butter, !- Outside Layer
peanut butter, !- Layer 2
peanut butter; !- Layer 3
"""
fhandle = StringIO(idftxt)
idf = IDF(fhandle)
result = modeleditor.rename(
idf,
'Material'.upper(),
'G01a 19mm gypsum board', 'peanut butter')
assert result.Name == 'peanut butter'
assert idf.idfobjects['CONSTRUCTION'][0].Outside_Layer == 'peanut butter'
assert idf.idfobjects['CONSTRUCTION'][0].Layer_3 == 'peanut butter'
def test_zonearea_zonevolume():
"""py.test for zonearea and zonevolume"""
idftxt = """Zone, 473222, 0.0, 0.0, 0.0, 0.0, , 1;
BuildingSurface:Detailed, F7289B, Floor, Exterior Floor, 473222,
Ground, ,
NoSun, NoWind, , 4, 2.23, 2.56, 0.0, 2.23, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2.56, 0.0; BuildingSurface:Detailed, F3659B, Wall, Exterior Wall,
473222, Outdoors, , SunExposed, WindExposed, , 4, 2.23, 2.56, 1.49,
2.23, 2.56, 0.0, 0.0, 2.56, 0.0, 0.0, 2.56, 1.49;
BuildingSurface:Detailed, 46C6C9, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23, 0.0, 1.49, 2.23,
0.0, 0.0, 2.23, 1.02548139464, 0.0, 2.23, 1.02548139464, 1.49;
BuildingSurface:Detailed, 4287DD, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 0.0, 2.56, 1.49, 0.0,
2.56, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.49;
BuildingSurface:Detailed, 570C2E, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 0.0, 0.0, 1.49, 0.0, 0.0,
0.0, 2.23, 0.0, 0.0, 2.23, 0.0, 1.49; BuildingSurface:Detailed,
BAEA99, Roof, Exterior Roof, 473222, Outdoors, , SunExposed,
WindExposed, , 4, 0.0, 2.56, 1.49, 0.0, 0.0, 1.49, 2.23, 0.0, 1.49,
2.23, 2.56, 1.49; BuildingSurface:Detailed, C879FE, Floor,
Exterior Floor, 473222, Ground, , NoSun, NoWind, , 4, 3.22,
2.52548139464, 0.0, 3.22, 1.02548139464, 0.0, 2.23,
1.02548139464, 0.0, 2.23, 2.52548139464, 0.0;
BuildingSurface:Detailed, 25B601, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23,
1.02548139464, 1.49, 2.23, 1.02548139464, 0.0, 2.23, 2.52548139464,
0.0, 2.23, 2.52548139464, 1.49; BuildingSurface:Detailed, F5EADC,
Wall, Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, ,
4, 2.23, 1.02548139464, 1.49, 2.23, 1.02548139464, 0.0, 3.22,
1.02548139464, 0.0, 3.22, 1.02548139464, 1.49;
BuildingSurface:Detailed, D0AABE, Wall, Exterior Wall, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 3.22, 1.02548139464,
1.49, 3.22, 1.02548139464, 0.0, 3.22, 2.52548139464, 0.0, 3.22,
2.52548139464, 1.49; BuildingSurface:Detailed, B0EA02, Wall,
Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, ,
4, 3.22, 2.52548139464, 1.49, 3.22, 2.52548139464, 0.0, 2.23,
2.52548139464, 0.0, 2.23, 2.52548139464, 1.49;
BuildingSurface:Detailed, E6DF3B, Roof, Exterior Roof, 473222,
Outdoors, , SunExposed, WindExposed, , 4, 2.23, 2.52548139464, 1.49,
2.23, 1.02548139464, 1.49, 3.22, 1.02548139464, 1.49, 3.22,
2.52548139464, 1.49; BuildingSurface:Detailed, 4F8681, Wall,
Exterior Wall, 473222, Outdoors, , SunExposed, WindExposed, , 4,
2.23, 2.52548139464, 1.49, 2.23, 2.52548139464, 0.0, 2.23, 2.56,
0.0, 2.23, 2.56, 1.49; """
idf = IDF(StringIO(idftxt))
result = modeleditor.zonearea(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zonearea_floor(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zonearea_roofceiling(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zone_floor2roofheight(idf, '473222')
assert almostequal(result, 1.49)
result = modeleditor.zoneheight(idf, '473222')
assert almostequal(result, 1.49)
result = modeleditor.zone_floor2roofheight(idf, '473222')
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, '473222')
assert almostequal(result, 10.718762)
# remove floor
zone = idf.getobject('ZONE', '473222')
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR']
for floor in floors:
idf.removeidfobject(floor)
result = modeleditor.zonearea_floor(idf, '473222')
assert almostequal(result, 0)
result = modeleditor.zonearea_roofceiling(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zonearea(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zoneheight(idf, '473222')
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, '473222')
assert almostequal(result, 10.718762)
# reload idf and remove roof/ceiling
idf = IDF(StringIO(idftxt))
zone = idf.getobject('ZONE', '473222')
surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()]
zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name]
roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF']
ceilings = [s for s in zone_surfs if s.Surface_Type.upper() == 'CEILING']
topsurfaces = roofs + ceilings
for surf in topsurfaces:
idf.removeidfobject(surf)
result = modeleditor.zonearea_roofceiling(idf, '473222')
assert almostequal(result, 0)
result = modeleditor.zonearea(idf, '473222')
assert almostequal(result, 7.1938)
result = modeleditor.zoneheight(idf, '473222')
assert almostequal(result, 1.49)
result = modeleditor.zonevolume(idf, '473222')
assert almostequal(result, 10.718762)
def test_new():
"""py.test for IDF.new()"""
idf = IDF()
idf.new()
# assert idf.idfobjects['building'.upper()] == Idf_MSequence()
assert idf.idfobjects['building'.upper()].list1 == []
assert idf.idfobjects['building'.upper()].list2 == []
def test_newidfobject():
"""py.test for newidfobject"""
# make a blank idf
# make a function for this and then continue.
idf = IDF()
idf.new()
objtype = 'material:airgap'.upper()
obj = idf.newidfobject(objtype, Name='Argon')
obj = idf.newidfobject(objtype, Name='Krypton')
obj = idf.newidfobject(objtype, Name='Xenon')
assert idf.model.dt[objtype] == [['MATERIAL:AIRGAP', 'Argon'],
['MATERIAL:AIRGAP', 'Krypton'],
['MATERIAL:AIRGAP', 'Xenon'],
]
# remove an object
idf.popidfobject(objtype, 1)
assert idf.model.dt[objtype] == [['MATERIAL:AIRGAP', 'Argon'],
['MATERIAL:AIRGAP', 'Xenon'],
]
lastobject = idf.idfobjects[objtype][-1]
idf.removeidfobject(lastobject)
assert idf.model.dt[objtype] == [['MATERIAL:AIRGAP', 'Argon'], ]
# copyidfobject
onlyobject = idf.idfobjects[objtype][0]
idf.copyidfobject(onlyobject)
assert idf.model.dt[objtype] == [['MATERIAL:AIRGAP', 'Argon'],
['MATERIAL:AIRGAP', 'Argon'],
]
# test some functions
objtype = 'FENESTRATIONSURFACE:DETAILED'
obj = idf.newidfobject(objtype, Name='A Wall')
assert obj.coords == []
assert obj.fieldvalues[1] == 'A Wall'
# test defaultvalues=True and defaultvalues=False
sim_deftrue = idf.newidfobject('SimulationControl'.upper(), defaultvalues=True)
assert sim_deftrue.Do_Zone_Sizing_Calculation == 'No'
sim_deffalse = idf.newidfobject('SimulationControl'.upper(), defaultvalues=False)
assert sim_deffalse.Do_Zone_Sizing_Calculation == ''
def test_newidfobject_warning():
"""Test that the warning for newidfobject created with `aname` is working.
Fails if the warning is not issued when `aname` is used, or if the warning
is issued when `aname` is not used.
"""
# make a blank idf
# make a function for this and then continue.
idf = IDF()
idf.new()
objtype = 'material:airgap'.upper()
# expect warnings here
with pytest.warns(UserWarning):
idf.newidfobject(objtype, aname="Krypton")
with pytest.warns(UserWarning):
idf.newidfobject(objtype, "Krypton")
# expect no warnings here - we pass None so as not to trigger the `Failed: DID NOT WARN` message from pytest
with pytest.warns(None) as captured_warnings:
idf.newidfobject(objtype, Name="Krypton")
assert len(captured_warnings) == 0
def test_save():
"""
Test the IDF.save() function using a filehandle to avoid external effects.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
# test save with just a filehandle
file_handle = StringIO()
idf.save(file_handle)
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
# minimal test that TestMaterial is being written to the file handle
assert expected in result
def test_save_with_lineendings_and_encodings():
"""
Test the IDF.save() function with combinations of encodings and line
endings.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
lineendings = ('windows', 'unix', 'default')
encodings = ('ascii', 'latin-1', 'UTF-8')
for le, enc in product(lineendings, encodings):
file_handle = StringIO()
idf.save(file_handle, encoding=enc, lineendings=le)
file_handle.seek(0)
result = file_handle.read().encode(enc)
if le == 'windows':
assert b'\r\n' in result
elif le == 'unix':
assert b'\r\n' not in result
elif le == 'default':
assert os.linesep.encode(enc) in result
def test_saveas():
"""Test the IDF.saveas() function.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
idf.idfname = 'test.idf'
try:
idf.saveas() # this should raise an error as no filename is passed
assert False
except TypeError:
pass
file_handle = StringIO()
idf.saveas(file_handle) # save with a filehandle
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
assert expected in result
# test the idfname attribute has been changed
assert idf.idfname != 'test.idf'
def test_savecopy():
"""Test the IDF.savecopy() function.
"""
file_text = "Material,TestMaterial, !- Name"
idf = IDF(StringIO(file_text))
idf.idfname = 'test.idf'
try:
idf.savecopy() # this should raise an error as no filename is passed
assert False
except TypeError:
pass
file_handle = StringIO()
idf.savecopy(file_handle) # save a copy with a different filename
expected = "TestMaterial"
file_handle.seek(0)
result = file_handle.read()
assert expected in result
# test the idfname attribute has not been changed
assert idf.idfname == 'test.idf'
def test_initread():
"""Test for IDF.initread() with filename in unicode and as python str.
"""
# setup
idf = IDF()
idf.initreadtxt(idfsnippet)
idf.saveas('tmp.idf')
# test fname as unicode
fname = 'tmp.idf'
assert isinstance(fname, string_types)
idf = IDF()
idf.initread(fname)
assert idf.getobject('BUILDING', 'Building')
# test fname as str
fname = str('tmp.idf')
assert isinstance(fname, string_types)
idf = IDF()
idf.initread(fname)
assert idf.getobject('BUILDING', 'Building')
# test that a nonexistent file raises an IOError
fname = "notarealfilename.notreal"
idf = IDF()
try:
idf.initread(fname)
assert False # shouldn't reach here
except IOError:
pass
# teardown
os.remove('tmp.idf')
def test_initreadtxt():
"""Test for IDF.initreadtxt().
"""
idftxt = """
Material,
G01a 19mm gypsum board, !- Name
MediumSmooth, !- Roughness
0.019, !- Thickness {m}
0.16, !- Conductivity {W/m-K}
800, !- Density {kg/m3}
1090; !- Specific Heat {J/kg-K}
Construction,
Interior Wall, !- Name
G01a 19mm gypsum board, !- Outside Layer
F04 Wall air space resistance, !- Layer 2
G01a 19mm gypsum board; !- Layer 3
"""
idf = IDF()
idf.initreadtxt(idftxt)
assert idf.getobject('MATERIAL', 'G01a 19mm gypsum board')
def test_idfstr():
"""Test all outputtype options in IDF.idfstr().
"""
idf = IDF()
idf.initreadtxt(idfsnippet)
assert idf.outputtype == 'standard' # start with the default
original = idf.idfstr()
assert "!-" in original # has comment
assert "\n" in original # has line break
assert "\n\n" in original # has empty line
idf.outputtype = 'standard'
s = idf.idfstr()
assert "!-" in s # has comment
assert "\n" in s # has line break
assert "\n\n" in s # has empty line
assert s == original # is unchanged
idf.outputtype = 'nocomment'
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" in s # has empty line
assert s != original # is changed
idf.outputtype = 'nocomment1'
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" in s # has empty lines
assert s != original # is changed
idf.outputtype = 'nocomment2'
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" in s # has line break
assert "\n\n" not in s # has no empty lines
assert s != original # is changed
idf.outputtype = 'compressed'
s = idf.idfstr()
assert "!-" not in s # has no comments
assert "\n" not in s # has no line breaks
assert "\n\n" not in s # has no empty lines
assert s != original # is changed
def test_refname2key():
"""py.test for refname2key"""
tdata = (
(
'TransformerNames',
['ElectricLoadCenter:Distribution'.upper(), ],
), # refname, key
(
'AllCurves',
[u'PUMP:VARIABLESPEED',
u'PUMP:CONSTANTSPEED', u'BOILER:HOTWATER',
u'ENERGYMANAGEMENTSYSTEM:CURVEORTABLEINDEXVARIABLE'],
), # refname, key
)
for refname, key in tdata:
fhandle = StringIO("")
idf = IDF(fhandle)
result = modeleditor.refname2key(idf, refname)
assert result == key
def test_getiddgroupdict():
"""py.test for IDF.getiddgroupdict()"""
data = ((
{
None: ['Lead Input', 'Simulation Data']
},
), # gdict,
)
for gdict, in data:
fhandle = StringIO("")
idf = IDF(fhandle)
result = idf.getiddgroupdict()
assert result[None] == gdict[None]
def test_idfinmsequence():
"""py.test for setting of theidf in Idf_MSequence"""
idftxt = """Version, 6.0;"""
# theidf set in Idf_MSequence.__init__
idf = IDF(StringIO(idftxt))
versions = idf.idfobjects['version'.upper()]
assert versions.theidf == idf
assert versions[0].theidf == idf
# theidf set in Idf_MSequence.insert()
material = idf.newidfobject('material'.upper())
assert material.theidf == idf
# theidf set when you pop an item
newmaterial = idf.newidfobject('material'.upper())
materials = idf.idfobjects['material'.upper()]
material = materials.pop(0)
assert material.theidf == None
assert materials[0].theidf == idf
def test_idd_index():
"""py.test to see if idd_index is returned"""
idftxt = """"""
idf = IDF(StringIO(idftxt))
assert idf.idd_index == {}
| {
"content_hash": "de5c49b94fe33b96bae7c88a92abff83",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 112,
"avg_line_length": 34.43760129659643,
"alnum_prop": 0.5908791415662651,
"repo_name": "jamiebull1/eppy",
"id": "d39098807cc2335f7bb0a5807bff1b516e7a6367",
"size": "21554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eppy/tests/test_modeleditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2050328"
},
{
"name": "Jupyter Notebook",
"bytes": "17625"
},
{
"name": "Makefile",
"bytes": "2233"
},
{
"name": "Python",
"bytes": "7026229"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
} |
target = 17
data = list(range(100))
def binary_search(target, data):
index = int(len(data)/2)
print(target, index, data)
if target == data[index]:
last = "hello {0}".format(target)
return last
elif target > data[index]:
binary_search(target, data[index + 1:])
elif target < data[index]:
binary_search(target, data[:index])
if index == 0:
return "I can't find"
print(binary_search(target, data))
| {
"content_hash": "b88a535f3a150bdc05b576992b1a7f67",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 47,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.5956989247311828,
"repo_name": "SELO77/seloPython",
"id": "4e4a7cea8571da895a06d14b243728cad5b52a81",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithm/binarysearch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47192"
}
],
"symlink_target": ""
} |
'''BrightSign UDP Node'''
### Libraries required by this Node
import socket
### Parameters used by this Node
param_ipAddress = Parameter('{"title":"IP Address","desc":"The IP address","schema":{"type":"string"}}')
PORT = 1010
### Functions used by this Node
def send_udp_string(msg):
#open socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(msg, (param_ipAddress, PORT))
except socket.error, msg:
print "error: %s\n" % msg
local_event_Error.emit(msg)
finally:
if sock:
sock.close()
### Local actions this Node provides
def local_action_Start(arg = None):
"""{"title":"Start","desc":"Start","group":"Content"}"""
print 'Action Start requested.'
send_udp_string('Start')
def local_action_Stop(arg = None):
"""{"title":"Stop","desc":"Stop","group":"Content"}"""
print 'Action Stop requested.'
send_udp_string('Stop')
def local_action_PlayClip01(arg = None):
"""{"title":"PlayClip01","desc":"PlayClip01","group":"Content"}"""
print 'Action PlayClip01 requested.'
send_udp_string('PlayClip01')
def local_action_Mute(arg = None):
"""{"title":"Mute","group":"Volume","schema":{"type":"string","enum": ['On', 'Off'], "required": True}}"""
print 'Action Mute%s requested' % arg
send_udp_string('MuteOn') if arg == 'On' else send_udp_string('MuteOff')
def local_action_MuteOn(arg = None):
"""{"title":"MuteOn","desc":"MuteOn","group":"Volume"}"""
print 'Action MuteOn requested.'
send_udp_string('MuteOn')
def local_action_MuteOff(arg = None):
"""{"title":"MuteOff","desc":"MuteOff","group":"Volume"}"""
print 'Action MuteOff requested.'
send_udp_string('MuteOff')
### Local events this Node provides
local_event_Error = LocalEvent('{"title":"Error","desc":"Error","group":"General"}')
### Main
def main(arg = None):
# Start your script here.
print 'Nodel script started.'
| {
"content_hash": "cbf3db22859972ec0eea198308a644d2",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 108,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.6519893899204244,
"repo_name": "museumsvictoria/nodel-recipes",
"id": "f3fe97c308edaa4564f03295565d0f8e55b93d93",
"size": "1885",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Brightsign/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "23083"
},
{
"name": "CSS",
"bytes": "203723"
},
{
"name": "HTML",
"bytes": "22272"
},
{
"name": "JavaScript",
"bytes": "1186857"
},
{
"name": "Python",
"bytes": "1695766"
},
{
"name": "XSLT",
"bytes": "45475"
}
],
"symlink_target": ""
} |
from .common import *
class Application(QApplication):
def __init__(self, **kwargs):
super(Application, self).__init__(sys.argv)
self.setStyleSheet(app_skin)
self.app_state_path = kwargs.get(
"app_state_path",
os.path.join(app_dir, "{}.appstate".format(app_settings["name"]))
)
self.app_state = {}
app_settings.update(kwargs)
| {
"content_hash": "9ff5c83f50711a79a3ff19de6165df11",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 35.583333333333336,
"alnum_prop": 0.5456674473067916,
"repo_name": "martastain/pyqt-bootstrap",
"id": "b7c6b021c60fce737397426a9f528608f794f92b",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyqtbs/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4853"
}
],
"symlink_target": ""
} |
import sys, json, numpy as np # must ensure that whatever this script relies on is installed on all cluster machines that could run this
def main():
lines = ''
for ln in sys.stdin.readlines(): lines += ln
inp = json.loads(lines)
arr = np.array(inp)
res = np.sum(arr)
print res
if __name__ == '__main__':
main() | {
"content_hash": "0443ea2ef300707f1fde11affd6620f6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 136,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6323529411764706,
"repo_name": "leviathanindustries/noddy",
"id": "d87dd7bcfdf2c525f55f579b21a69fe7cab9022b",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snake_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "893099"
},
{
"name": "JavaScript",
"bytes": "58153"
},
{
"name": "Python",
"bytes": "1568"
},
{
"name": "Shell",
"bytes": "9089"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_shirt_formal_02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "92e1d0d4283931b2538a1fe3c708e50c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7009345794392523,
"repo_name": "anhstudios/swganh",
"id": "30c1f09c119b790ce0c55ef337a212172e11d6fe",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_shirt_formal_02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""Everything in this module is taken from the excellent trio project.
Having the public path in .__module__ attributes is important for:
- exception names in printed tracebacks
- ~sphinx :show-inheritance:~
- deprecation warnings
- pickle
- probably other stuff
"""
import os
def fixup_module_metadata(namespace):
def fix_one(qualname, name, obj):
# Custom extension, to handle classmethods, staticmethods and properties
if isinstance(obj, (classmethod, staticmethod)):
obj = obj.__func__
if isinstance(obj, property):
obj = obj.fget
mod = getattr(obj, "__module__", None)
if mod is not None and mod.startswith("fbchat."):
obj.__module__ = "fbchat"
# Modules, unlike everything else in Python, put fully-qualitied
# names into their __name__ attribute. We check for "." to avoid
# rewriting these.
if hasattr(obj, "__name__") and "." not in obj.__name__:
obj.__name__ = name
obj.__qualname__ = qualname
if isinstance(obj, type):
# Fix methods
for attr_name, attr_value in obj.__dict__.items():
fix_one(objname + "." + attr_name, attr_name, attr_value)
for objname, obj in namespace.items():
if not objname.startswith("_"): # ignore private attributes
fix_one(objname, objname, obj)
# Allow disabling this when running Sphinx
# This is done so that Sphinx autodoc can detect the file's source
# TODO: Find a better way to detect when we're running Sphinx!
if os.environ.get("_FBCHAT_DISABLE_FIX_MODULE_METADATA") == "1":
fixup_module_metadata = lambda namespace: None
| {
"content_hash": "39296a5a046d2b6bc856bb1efd3ef11d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 38.422222222222224,
"alnum_prop": 0.6182764603817236,
"repo_name": "carpedm20/fbchat",
"id": "f3e16189e425945ef907a62239c94ab487930e11",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fbchat/_fix_module_metadata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "332266"
}
],
"symlink_target": ""
} |
from email.utils import formataddr
from email_from_template import send_mail
from django import forms
from django.conf import settings
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
subject = forms.CharField()
message = forms.CharField()
def save(self):
from_email = formataddr((
self.cleaned_data['name'],
self.cleaned_data['email'],
))
send_mail(
(settings.DEFAULT_FROM_EMAIL,),
'static/contact.email',
self.cleaned_data,
from_email,
)
| {
"content_hash": "315d6b6197f1fc2615edef29dbf639c7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 43,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.6013289036544851,
"repo_name": "takeyourmeds/takeyourmeds-web",
"id": "7afefd734cc2ed494eb6c6e52e7340ef23212656",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "takeyourmeds/static/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266001"
},
{
"name": "HTML",
"bytes": "80882"
},
{
"name": "JavaScript",
"bytes": "248719"
},
{
"name": "Nginx",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "107863"
},
{
"name": "Shell",
"bytes": "918"
}
],
"symlink_target": ""
} |
import sys
import time
import unittest
import pyev
from whizzer.defer import Deferred, CancelledError, AlreadyCalledError, TimeoutError
from common import loop
def throw_always(result):
raise Exception("success")
def one_always(result):
return 1
def add(a, b):
return a+b
class TestDeferred(unittest.TestCase):
def setUp(self):
self.deferred = Deferred(loop)
self.result = None
def tearDown(self):
self.deferred = None
self.result = None
def set_result(self, result):
self.result = result
def set_exception(self, exception):
self.exception = exception
def call_later(self, delay, func, *args, **kwargs):
timer = pyev.Timer(delay, 0.0, loop, self._do_later, (func, args, kwargs))
timer.start()
return timer
def _do_later(self, watcher, events):
(func, args, kwargs) = watcher.data
func(*args, **kwargs)
watcher.stop()
def test_callback(self):
self.deferred.add_callback(self.set_result)
self.deferred.callback(5)
self.assertTrue(self.result==5)
def test_callback_chain(self):
d = self.deferred
d.add_callback(add, 1)
d.add_callback(self.set_result)
self.deferred.callback(5)
self.assertTrue(self.result==6)
def test_log_error(self):
"""Unhandled exceptions should be logged if the deferred is deleted."""
self.deferred.add_callback(throw_always)
self.deferred.callback(None)
self.deferred = None # delete it
def test_errback(self):
self.deferred.add_errback(self.set_result)
self.deferred.errback(Exception())
self.assertTrue(isinstance(self.result, Exception))
def test_callback_skips(self):
"""When a callback raises an exception
all callbacks without errbacks are skipped until the next
errback is found.
"""
self.deferred.add_callback(throw_always)
self.deferred.add_callback(one_always)
self.deferred.add_callback(add, 2)
self.deferred.add_errback(one_always)
self.deferred.add_callback(self.set_result)
self.deferred.callback(None)
self.assertTrue(self.result==1)
def test_errback_reraised(self):
"""If an errback raises, then the next errback is called."""
self.deferred.add_errback(throw_always)
self.deferred.add_errback(self.set_result)
self.deferred.errback(Exception())
self.assertTrue(isinstance(self.result, Exception))
def test_cancelled(self):
self.deferred.cancel()
self.assertRaises(CancelledError, self.deferred.errback, Exception("testcancelled"))
self.assertRaises(CancelledError, self.deferred.callback, None)
self.assertRaises(CancelledError, self.deferred.result)
def test_already_called(self):
self.deferred.callback(None)
self.assertRaises(AlreadyCalledError, self.deferred.errback, Exception("testalreadycalled"))
self.assertRaises(AlreadyCalledError, self.deferred.callback, None)
self.assertRaises(AlreadyCalledError, self.deferred.cancel)
def test_cancel_callback(self):
self.deferred = Deferred(loop, cancelled_cb=self.set_result)
self.deferred.cancel()
self.assertTrue(self.result == self.deferred)
def test_result_chain(self):
self.deferred.callback(5)
self.assertTrue(self.deferred.result()==5)
self.deferred.add_callback(add, 2)
self.assertTrue(self.deferred.result()==7)
self.deferred.add_callback(throw_always)
self.assertRaises(Exception, self.deferred.result)
def test_result(self):
self.deferred.callback(5)
self.assertTrue(self.deferred.result()==5)
def test_result_exceptioned(self):
self.deferred.errback(Exception("exceptioned result"))
self.assertRaises(Exception, self.deferred.result)
def test_delayed_result(self):
now = time.time()
t1 = self.call_later(0.5, self.deferred.callback, 5)
self.assertTrue(self.deferred.result() == 5)
self.assertTrue(time.time() - now > 0.4)
def test_delayed_result_chained(self):
now = time.time()
t1 = self.call_later(0.5, self.deferred.callback, 5)
self.deferred.add_callback(add, 4)
self.assertTrue(self.deferred.result() == 9)
self.assertTrue(time.time() - now > 0.4)
def test_delayed_result_timeout(self):
t1 = self.call_later(0.5, self.deferred.callback, 5)
self.assertRaises(TimeoutError, self.deferred.result, 0.1)
def test_delayed_result_cancelled(self):
t1 = self.call_later(0.5, self.deferred.callback, 5)
t2 = self.call_later(0.2, self.deferred.cancel)
self.assertRaises(CancelledError, self.deferred.result, 0.3)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8cdeb2f7f1a884b765346e1fe8da85ef",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 100,
"avg_line_length": 33.827586206896555,
"alnum_prop": 0.6579001019367992,
"repo_name": "bfrog/whizzer",
"id": "cb9b02365ab11bbf99489ad4f70a5754993aafb2",
"size": "6047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whizzer/test/test_defer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136425"
}
],
"symlink_target": ""
} |
import doctest
import unittest
import pytest
from babel import core, Locale
from babel.core import default_locale, Locale
def test_locale_provides_access_to_cldr_locale_data():
locale = Locale('en', 'US')
assert u'English (United States)' == locale.display_name
assert u'.' == locale.number_symbols['decimal']
def test_locale_repr():
assert repr(Locale('en', 'US')) == "Locale('en', territory='US')"
assert ("Locale('de', territory='DE')" == repr(Locale('de', 'DE')))
assert ("Locale('zh', territory='CN', script='Hans')" ==
repr(Locale('zh', 'CN', script='Hans')))
def test_locale_comparison():
en_US = Locale('en', 'US')
en_US_2 = Locale('en', 'US')
fi_FI = Locale('fi', 'FI')
bad_en_US = Locale('en_US')
assert en_US == en_US
assert en_US == en_US_2
assert en_US != fi_FI
assert not (en_US != en_US_2)
assert None != en_US
assert en_US != bad_en_US
assert fi_FI != bad_en_US
def test_can_return_default_locale(os_environ):
os_environ['LC_MESSAGES'] = 'fr_FR.UTF-8'
assert Locale('fr', 'FR') == Locale.default('LC_MESSAGES')
def test_ignore_invalid_locales_in_lc_ctype(os_environ):
# This is a regression test specifically for a bad LC_CTYPE setting on
# MacOS X 10.6 (#200)
os_environ['LC_CTYPE'] = 'UTF-8'
# must not throw an exception
default_locale('LC_CTYPE')
def test_get_global():
assert core.get_global('zone_aliases')['UTC'] == 'Etc/GMT'
assert core.get_global('zone_territories')['Europe/Berlin'] == 'DE'
def test_hash():
locale_a = Locale('en', 'US')
locale_b = Locale('en', 'US')
locale_c = Locale('fi', 'FI')
assert hash(locale_a) == hash(locale_b)
assert hash(locale_a) != hash(locale_c)
class TestLocaleClass:
def test_attributes(self):
locale = Locale('en', 'US')
assert locale.language == 'en'
assert locale.territory == 'US'
def test_default(self, os_environ):
for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
os_environ[name] = ''
os_environ['LANG'] = 'fr_FR.UTF-8'
default = Locale.default('LC_MESSAGES')
assert (default.language, default.territory) == ('fr', 'FR')
def test_negotiate(self):
de_DE = Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
assert (de_DE.language, de_DE.territory) == ('de', 'DE')
de = Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
assert (de.language, de.territory) == ('de', None)
nothing = Locale.negotiate(['de_DE', 'de'], ['en_US'])
assert nothing is None
def test_negotiate_custom_separator(self):
de_DE = Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
assert (de_DE.language, de_DE.territory) == ('de', 'DE')
def test_parse(self):
l = Locale.parse('de-DE', sep='-')
assert l.display_name == 'Deutsch (Deutschland)'
de_DE = Locale.parse(l)
assert (de_DE.language, de_DE.territory) == ('de', 'DE')
def test_parse_likely_subtags(self):
l = Locale.parse('zh-TW', sep='-')
assert l.language == 'zh'
assert l.territory == 'TW'
assert l.script == 'Hant'
l = Locale.parse('zh_CN')
assert l.language == 'zh'
assert l.territory == 'CN'
assert l.script == 'Hans'
l = Locale.parse('zh_SG')
assert l.language == 'zh'
assert l.territory == 'SG'
assert l.script == 'Hans'
l = Locale.parse('und_AT')
assert l.language == 'de'
assert l.territory == 'AT'
l = Locale.parse('und_UK')
assert l.language == 'en'
assert l.territory == 'GB'
assert l.script is None
def test_get_display_name(self):
zh_CN = Locale('zh', 'CN', script='Hans')
assert zh_CN.get_display_name('en') == 'Chinese (Simplified, China)'
def test_display_name_property(self):
assert Locale('en').display_name == 'English'
assert Locale('en', 'US').display_name == 'English (United States)'
assert Locale('sv').display_name == 'svenska'
def test_english_name_property(self):
assert Locale('de').english_name == 'German'
assert Locale('de', 'DE').english_name == 'German (Germany)'
def test_languages_property(self):
assert Locale('de', 'DE').languages['ja'] == 'Japanisch'
def test_scripts_property(self):
assert Locale('en', 'US').scripts['Hira'] == 'Hiragana'
def test_territories_property(self):
assert Locale('es', 'CO').territories['DE'] == 'Alemania'
def test_variants_property(self):
assert (Locale('de', 'DE').variants['1901'] ==
'Alte deutsche Rechtschreibung')
def test_currencies_property(self):
assert Locale('en').currencies['COP'] == 'Colombian Peso'
assert Locale('de', 'DE').currencies['COP'] == 'Kolumbianischer Peso'
def test_currency_symbols_property(self):
assert Locale('en', 'US').currency_symbols['USD'] == '$'
assert Locale('es', 'CO').currency_symbols['USD'] == 'US$'
def test_number_symbols_property(self):
assert Locale('fr', 'FR').number_symbols['decimal'] == ','
def test_decimal_formats(self):
assert Locale('en', 'US').decimal_formats[None].pattern == '#,##0.###'
def test_currency_formats_property(self):
assert (Locale('en', 'US').currency_formats['standard'].pattern ==
u'\xa4#,##0.00')
assert (Locale('en', 'US').currency_formats['accounting'].pattern ==
u'\xa4#,##0.00')
def test_percent_formats_property(self):
assert Locale('en', 'US').percent_formats[None].pattern == '#,##0%'
def test_scientific_formats_property(self):
assert Locale('en', 'US').scientific_formats[None].pattern == '#E0'
def test_periods_property(self):
assert Locale('en', 'US').periods['am'] == 'AM'
def test_days_property(self):
assert Locale('de', 'DE').days['format']['wide'][3] == 'Donnerstag'
def test_months_property(self):
assert Locale('de', 'DE').months['format']['wide'][10] == 'Oktober'
def test_quarters_property(self):
assert Locale('de', 'DE').quarters['format']['wide'][1] == '1. Quartal'
def test_eras_property(self):
assert Locale('en', 'US').eras['wide'][1] == 'Anno Domini'
assert Locale('en', 'US').eras['abbreviated'][0] == 'BC'
def test_time_zones_property(self):
time_zones = Locale('en', 'US').time_zones
assert (time_zones['Europe/London']['long']['daylight'] ==
'British Summer Time')
assert time_zones['America/St_Johns']['city'] == u'St. John\u2019s'
def test_meta_zones_property(self):
meta_zones = Locale('en', 'US').meta_zones
assert (meta_zones['Europe_Central']['long']['daylight'] ==
'Central European Summer Time')
def test_zone_formats_property(self):
assert Locale('en', 'US').zone_formats['fallback'] == '%(1)s (%(0)s)'
assert Locale('pt', 'BR').zone_formats['region'] == u'Hor\xe1rio %s'
def test_first_week_day_property(self):
assert Locale('de', 'DE').first_week_day == 0
assert Locale('en', 'US').first_week_day == 6
def test_weekend_start_property(self):
assert Locale('de', 'DE').weekend_start == 5
def test_weekend_end_property(self):
assert Locale('de', 'DE').weekend_end == 6
def test_min_week_days_property(self):
assert Locale('de', 'DE').min_week_days == 4
def test_date_formats_property(self):
assert Locale('en', 'US').date_formats['short'].pattern == 'M/d/yy'
assert Locale('fr', 'FR').date_formats['long'].pattern == 'd MMMM y'
def test_time_formats_property(self):
assert Locale('en', 'US').time_formats['short'].pattern == 'h:mm a'
assert Locale('fr', 'FR').time_formats['long'].pattern == 'HH:mm:ss z'
def test_datetime_formats_property(self):
assert Locale('en').datetime_formats['full'] == u"{1} 'at' {0}"
assert Locale('th').datetime_formats['medium'] == u'{1} {0}'
def test_datetime_skeleton_property(self):
assert Locale('en').datetime_skeletons['Md'].pattern == u"M/d"
assert Locale('th').datetime_skeletons['Md'].pattern == u'd/M'
def test_plural_form_property(self):
assert Locale('en').plural_form(1) == 'one'
assert Locale('en').plural_form(0) == 'other'
assert Locale('fr').plural_form(0) == 'one'
assert Locale('ru').plural_form(100) == 'many'
def test_default_locale(os_environ):
for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
os_environ[name] = ''
os_environ['LANG'] = 'fr_FR.UTF-8'
assert default_locale('LC_MESSAGES') == 'fr_FR'
os_environ['LC_MESSAGES'] = 'POSIX'
assert default_locale('LC_MESSAGES') == 'en_US_POSIX'
for value in ['C', 'C.UTF-8', 'POSIX']:
os_environ['LANGUAGE'] = value
assert default_locale() == 'en_US_POSIX'
def test_negotiate_locale():
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT']) ==
'de_DE')
assert core.negotiate_locale(['de_DE', 'en_US'], ['en', 'de']) == 'de'
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) ==
'de_DE')
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) ==
'de_DE')
assert (core.negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US']) ==
'ja_JP')
assert core.negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE']) == 'nb_NO'
def test_parse_locale():
assert core.parse_locale('zh_CN') == ('zh', 'CN', None, None)
assert core.parse_locale('zh_Hans_CN') == ('zh', 'CN', 'Hans', None)
assert core.parse_locale('zh-CN', sep='-') == ('zh', 'CN', None, None)
with pytest.raises(ValueError) as excinfo:
core.parse_locale('not_a_LOCALE_String')
assert (excinfo.value.args[0] ==
"'not_a_LOCALE_String' is not a valid locale identifier")
assert core.parse_locale('it_IT@euro') == ('it', 'IT', None, None)
assert core.parse_locale('en_US.UTF-8') == ('en', 'US', None, None)
assert (core.parse_locale('de_DE.iso885915@euro') ==
('de', 'DE', None, None))
@pytest.mark.parametrize('filename', [
'babel/global.dat',
'babel/locale-data/root.dat',
'babel/locale-data/en.dat',
'babel/locale-data/en_US.dat',
'babel/locale-data/en_US_POSIX.dat',
'babel/locale-data/zh_Hans_CN.dat',
'babel/locale-data/zh_Hant_TW.dat',
'babel/locale-data/es_419.dat',
])
def test_compatible_classes_in_global_and_localedata(filename):
# Use pickle module rather than cPickle since cPickle.Unpickler is a method
# on Python 2
import pickle
class Unpickler(pickle.Unpickler):
def find_class(self, module, name):
# *.dat files must have compatible classes between Python 2 and 3
if module.split('.')[0] == 'babel':
return pickle.Unpickler.find_class(self, module, name)
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
(module, name))
with open(filename, 'rb') as f:
return Unpickler(f).load()
| {
"content_hash": "82d40f195ad099cfd03cb337d2f69849",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 79,
"avg_line_length": 37.2203947368421,
"alnum_prop": 0.5818824569155988,
"repo_name": "srisankethu/babel",
"id": "54cf37dde5352361a806e07396ef561e4f4a412a",
"size": "11810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "JavaScript",
"bytes": "4753"
},
{
"name": "Makefile",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "580123"
},
{
"name": "Shell",
"bytes": "453"
}
],
"symlink_target": ""
} |
import logging
import sys
import os
from textwrap import dedent
import requests
from tambo import Transport
import chacractl
from chacractl.util import retry
logger = logging.getLogger(__name__)
class Project(object):
_help = dedent("""
Handle projects on a remote chacra instance.
Creating a new project::
chacractl project create project
Options:
create Creates a new project
""")
help_menu = "create projects"
options = ['create']
def __init__(self, argv):
self.argv = argv
@property
def base_url(self):
return os.path.join(
chacractl.config['url'], 'binaries'
)
def sanitize_url(self, url_part):
# get rid of the leading slash to prevent issues when joining
url = url_part.lstrip('/')
# and add a trailing slash so that the request is done at the correct
# canonical url
if not url.endswith('/'):
url = "%s/" % url
return url
@retry()
def post(self, url):
exists = requests.head(url, verify=chacractl.config['ssl_verify'])
if exists.status_code == 200:
logger.warning('resource exists, will not upload')
logger.warning('SKIP %s', url)
return
elif exists.status_code == 404:
logger.info('POSTing to project: %s', url)
response = requests.post(
url,
auth=chacractl.config['credentials'],
verify=chacractl.config['ssl_verify'])
if response.status_code > 201:
logger.warning("%s -> %s", response.status_code, response.text)
response.raise_for_status()
@retry()
def delete(self, url):
# XXX This exists here but it is not yet implemented, e.g. nothing
# calls this method
exists = requests.head(url, verify=chacractl.config['ssl_verify'])
if exists.status_code == 404:
logger.warning('project already deleted')
logger.warning('SKIP %s', url)
return
logger.info('DELETE project: %s', url)
response = requests.delete(
url,
auth=chacractl.config['credentials'],
verify=chacractl.config['ssl_verify'])
if response.status_code > 201:
logger.warning("%s -> %s", response.status_code, response.text)
def main(self):
self.parser = Transport(self.argv, options=self.options)
self.parser.catch_help = self._help
self.parser.parse_args()
# handle posting projects:
if self.parser.has('create'):
url_part = self.sanitize_url(self.parser.get('create'))
if not sys.stdin.isatty():
# read from stdin
logger.info('reading input from stdin')
for line in sys.stdin.readlines():
url = os.path.join(self.base_url, url_part)
self.post(url)
else:
url = os.path.join(self.base_url, url_part)
self.post(url)
# XXX this exists here but it not yet enabled from the CLI
elif self.parser.has('delete'):
url_part = self.sanitize_url(self.parser.get('delete'))
url = os.path.join(self.base_url, url_part)
self.delete(url)
| {
"content_hash": "5e1bcbd32f1574d197d98899d6d33b7e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 31.80952380952381,
"alnum_prop": 0.5739520958083832,
"repo_name": "ceph/chacractl",
"id": "4bc134b7b26d45d1ad6ac9f7b3fe53706f3969fc",
"size": "3340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chacractl/api/projects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31485"
}
],
"symlink_target": ""
} |
from re import compile
import pytest
from nerodia.elements.html_elements import HTMLElement
from nerodia.locators.text_area.selector_builder import SelectorBuilder
ATTRIBUTES = HTMLElement.ATTRIBUTES
@pytest.fixture
def builder(browser_mock):
yield SelectorBuilder(ATTRIBUTES, browser_mock)
class TestBuild(object):
def test_always_return_value_argument_for_string(self, builder):
items = {
'selector': {'tag_name': 'textarea', 'value': 'Foo'},
'built': {'xpath': ".//*[local-name()='textarea']", 'value': 'Foo'}
}
assert builder.build(items['selector']) == items['built']
def test_always_return_value_argument_for_regex(self, builder):
items = {
'selector': {'tag_name': 'textarea', 'value': compile(r'Foo')},
'built': {'xpath': ".//*[local-name()='textarea']", 'value': compile(r'Foo')}
}
assert builder.build(items['selector']) == items['built']
| {
"content_hash": "d07cca179996b387a21b5f0804f36515",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 89,
"avg_line_length": 33.275862068965516,
"alnum_prop": 0.633160621761658,
"repo_name": "lmtierney/watir-snake",
"id": "fd5f4d5bc5c3cfb8970c08471d87b6b5063e686e",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/selector_builder/textarea_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "403217"
}
],
"symlink_target": ""
} |
import time
from mod_pywebsocket import stream
from mod_pywebsocket.handshake.hybi import compute_accept
def web_socket_do_extra_handshake(request):
# This simulates a broken server that sends a WebSocket frame before the
# handshake, and more frames afterwards. It is important that if this
# happens the client does not buffer all the frames as the server continues
# to send more data - it should abort after reading a reasonable number of
# bytes (set arbitrarily to 1024).
frame = stream.create_text_frame('\0Frame-contains-thirty-two-bytes')
msg = frame
msg += 'HTTP/1.1 101 Switching Protocols\r\n'
msg += 'Upgrade: websocket\r\n'
msg += 'Connection: Upgrade\r\n'
msg += 'Sec-WebSocket-Accept: %s\r\n' % compute_accept(request.headers_in['Sec-WebSocket-Key'])[0]
msg += '\r\n'
request.connection.write(msg)
# continue writing data until the client disconnects
while True:
time.sleep(1)
numFrames = 1024 / len(frame) # write over 1024 bytes including the above handshake
for i in range(0, numFrames):
request.connection.write(frame)
def web_socket_transfer_data(request):
pass
| {
"content_hash": "a04d658ed8f328729b3453a86f9b74f0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 102,
"avg_line_length": 39.63333333333333,
"alnum_prop": 0.6980656013456686,
"repo_name": "Xperia-Nicki/android_platform_sony_nicki",
"id": "b3eb961e80ebcedccf1b3b9c5c7a69c109e50dad",
"size": "2523",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "external/webkit/LayoutTests/http/tests/websocket/tests/hybi/handshake-fail-by-prepended-null_wsh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "212775"
},
{
"name": "Awk",
"bytes": "19252"
},
{
"name": "C",
"bytes": "68667466"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "54670920"
},
{
"name": "CLIPS",
"bytes": "12224"
},
{
"name": "CSS",
"bytes": "283405"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Java",
"bytes": "4882"
},
{
"name": "JavaScript",
"bytes": "19597804"
},
{
"name": "Objective-C",
"bytes": "5849156"
},
{
"name": "PHP",
"bytes": "17224"
},
{
"name": "Pascal",
"bytes": "42411"
},
{
"name": "Perl",
"bytes": "1632149"
},
{
"name": "Prolog",
"bytes": "214621"
},
{
"name": "Python",
"bytes": "3493321"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Ruby",
"bytes": "78743"
},
{
"name": "Scilab",
"bytes": "554"
},
{
"name": "Shell",
"bytes": "265637"
},
{
"name": "TypeScript",
"bytes": "45459"
},
{
"name": "XSLT",
"bytes": "11219"
}
],
"symlink_target": ""
} |
import sys
from zope.interface import implements
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.authentication import CallbackAuthenticationPolicy, \
AuthTktAuthenticationPolicy
from pyvac.models import DBSession, User
class AuthBasicAuthenticationPolicy(CallbackAuthenticationPolicy):
implements(IAuthenticationPolicy)
def __init__(self, callback=None):
self.callback = callback
def authenticated_userid(self, request):
auth = request.environ.get('HTTP_AUTHORIZATION')
try:
authmeth, auth = auth.split(' ', 1)
except AttributeError as ValueError: # not enough values to unpack
return None
if authmeth.lower() != 'basic':
return None
try:
# Python 3's string is already unicode
auth = auth.strip().decode('base64')
if sys.version_info[0] == 2:
auth = unicode(auth)
except binascii.Error: # can't decode
return None
try:
login, password = auth.split(':', 1)
except ValueError: # not enough values to unpack
return None
if User.by_credentials(DBSession(), login, password):
return login
return None
def unauthenticated_userid(self, request):
return self.authenticated_userid(request)
def remember(self, request, principal, **kw):
return []
def forget(self, request):
return []
class RouteSwithchAuthPolicy(CallbackAuthenticationPolicy):
implements(IAuthenticationPolicy)
def __init__(self, secret='key',callback=None):
self.impl = {'basic': AuthBasicAuthenticationPolicy(callback=callback),
'tk': AuthTktAuthenticationPolicy(secret,
callback=callback,
hashalg='sha512')
}
self.callback = callback
def get_impl(self, request):
if request.matched_route and request.matched_route.name in (
'list_simple','show_simple',
'show_release_file','show_external_release_file',
'upload_releasefile'):
return self.impl['basic']
return self.impl['tk']
def authenticated_userid(self, request):
impl = self.get_impl(request)
return impl.authenticated_userid(request)
def unauthenticated_userid(self, request):
impl = self.get_impl(request)
return impl.unauthenticated_userid(request)
def remember(self, request, principal, **kw):
impl = self.get_impl(request)
return impl.remember(request, principal, **kw)
def forget(self, request, *args, **kw):
impl = self.get_impl(request)
return impl.forget(request, *args, **kw)
| {
"content_hash": "8744c39102f34a32d7754600ce738f84",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 31.876404494382022,
"alnum_prop": 0.6112090236164963,
"repo_name": "doyousoft/pyvac",
"id": "def2bf24638294b285b2425b79c375e0f1891cf7",
"size": "2860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvac/helpers/authentication.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96138"
},
{
"name": "HTML",
"bytes": "54295"
},
{
"name": "JavaScript",
"bytes": "4635"
},
{
"name": "Python",
"bytes": "257565"
}
],
"symlink_target": ""
} |
import mock
import six
from nova import objects
from nova.scheduler.filters import type_filter
from nova import test
from nova.tests.unit.scheduler import fakes
from nova.tests import uuidsentinel as uuids
class TestTypeFilter(test.NoDBTestCase):
def test_type_filter(self):
with mock.patch.object(type_filter.LOG, 'warning') as mock_warning:
self.filt_cls = type_filter.TypeAffinityFilter()
# make sure we logged a deprecation warning
self.assertEqual(1, mock_warning.call_count)
self.assertIn('TypeAffinityFilter is deprecated for removal',
six.text_type(mock_warning.call_args_list[0][0]))
host = fakes.FakeHostState('fake_host', 'fake_node', {})
host.instances = {}
target_id = 1
spec_obj = objects.RequestSpec(
context=mock.MagicMock(),
flavor=objects.Flavor(id=target_id))
# True since no instances on host
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# Add an instance with the same instance_type_id
inst1 = objects.Instance(uuid=uuids.instance_1,
instance_type_id=target_id)
host.instances = {inst1.uuid: inst1}
# True since only same instance_type_id on host
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# Add an instance with a different instance_type_id
diff_type = target_id + 1
inst2 = objects.Instance(uuid=uuids.instance_2,
instance_type_id=diff_type)
host.instances.update({inst2.uuid: inst2})
# False since host now has an instance of a different type
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_type_filter_no_metadata(self, agg_mock):
self.filt_cls = type_filter.AggregateTypeAffinityFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake1'))
host = fakes.FakeHostState('fake_host', 'fake_node', {})
# tests when no instance_type is defined for aggregate
agg_mock.return_value = set([])
# True as no instance_type set for aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
agg_mock.assert_called_once_with(host, 'instance_type')
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_type_filter_single_instance_type(self, agg_mock):
self.filt_cls = type_filter.AggregateTypeAffinityFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake1'))
spec_obj2 = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake2'))
host = fakes.FakeHostState('fake_host', 'fake_node', {})
# tests when a single instance_type is defined for an aggregate
# using legacy single value syntax
agg_mock.return_value = set(['fake1'])
# True as instance_type is allowed for aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# False as instance_type is not allowed for aggregate
self.assertFalse(self.filt_cls.host_passes(host, spec_obj2))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_type_filter_multi_aggregate(self, agg_mock):
self.filt_cls = type_filter.AggregateTypeAffinityFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake1'))
spec_obj2 = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake2'))
spec_obj3 = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake3'))
host = fakes.FakeHostState('fake_host', 'fake_node', {})
# tests when a single instance_type is defined for multiple aggregates
# using legacy single value syntax
agg_mock.return_value = set(['fake1', 'fake2'])
# True as instance_type is allowed for first aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# True as instance_type is allowed for second aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj2))
# False as instance_type is not allowed for aggregates
self.assertFalse(self.filt_cls.host_passes(host, spec_obj3))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_type_filter_multi_instance_type(self, agg_mock):
self.filt_cls = type_filter.AggregateTypeAffinityFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake1'))
spec_obj2 = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake2'))
spec_obj3 = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(name='fake3'))
host = fakes.FakeHostState('fake_host', 'fake_node', {})
# tests when multiple instance_types are defined for aggregate
agg_mock.return_value = set(['fake1,fake2'])
# True as instance_type is allowed for aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# True as instance_type is allowed for aggregate
self.assertTrue(self.filt_cls.host_passes(host, spec_obj2))
# False as instance_type is not allowed for aggregate
self.assertFalse(self.filt_cls.host_passes(host, spec_obj3))
| {
"content_hash": "76336678b75e71335dd87fa46b0ffd63",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 45.02325581395349,
"alnum_prop": 0.6539256198347108,
"repo_name": "jianghuaw/nova",
"id": "5180dc4c59f6a2dfb9a8a1ac11f548f08fa31f4c",
"size": "6381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/scheduler/filters/test_type_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ExtendedStatusSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-extended-status"
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(ExtendedStatusSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_status.'
'Extended_status')
return f
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
| {
"content_hash": "49f1f6ae7c02c7eeb61cef2767366d23",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 38.255319148936174,
"alnum_prop": 0.6101223581757509,
"repo_name": "takeshineshiro/nova",
"id": "09d48051bb5a828bb363fbe99c3f413c84f4de37",
"size": "2430",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/functional/v3/test_extended_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16467436"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "285755"
}
],
"symlink_target": ""
} |
"""
discord.ext.commands
~~~~~~~~~~~~~~~~~~~~~
An extension module to facilitate creation of bot commands.
:copyright: (c) 2015-present Rapptz
:license: MIT, see LICENSE for more details.
"""
from .bot import *
from .cog import *
from .context import *
from .converter import *
from .cooldowns import *
from .core import *
from .errors import *
from .flags import *
from .help import *
from .parameters import *
from .hybrid import *
| {
"content_hash": "dba949e496f65445e0b581fd819e6750",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 20.80952380952381,
"alnum_prop": 0.6979405034324943,
"repo_name": "Rapptz/discord.py",
"id": "08dab54d343844e2cd4a27b43ec235974c6fd995",
"size": "437",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "discord/ext/commands/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2493009"
}
],
"symlink_target": ""
} |
from unittest import TestCase
class NeoTestCase(TestCase):
pass
| {
"content_hash": "c0d4560d3bcb73a8c6b657a562dad3a1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 29,
"avg_line_length": 11.833333333333334,
"alnum_prop": 0.7605633802816901,
"repo_name": "localhuman/neo-python",
"id": "16d840afe42e30d2e76058f4c1aa5d165d3e67ce",
"size": "71",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/Utils/NeoTestCase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1667"
},
{
"name": "Python",
"bytes": "810803"
}
],
"symlink_target": ""
} |
import igt_base_template
class APNTemplate(igt_base_template.BaseTemplate):
def __init__(self):
igt_base_template.BaseTemplate.__init__(self)
| {
"content_hash": "4678e7e88f825520725300d9317b274f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 53,
"avg_line_length": 27,
"alnum_prop": 0.6790123456790124,
"repo_name": "cainli/appLog",
"id": "4af9ef3273740931940130e40ac1ba740c3d0288",
"size": "162",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "MobileLogMgr/igetui/template/igt_apn_template.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "82155"
},
{
"name": "CSS",
"bytes": "2593"
},
{
"name": "HTML",
"bytes": "16040"
},
{
"name": "JavaScript",
"bytes": "36227"
},
{
"name": "Protocol Buffer",
"bytes": "14537"
},
{
"name": "Python",
"bytes": "603334"
}
],
"symlink_target": ""
} |
"""config.py
Configuration
"""
import ConfigParser
cp = ConfigParser.ConfigParser()
cp.read('config/config.ini')
# database configuration
db_user = cp.get('database', 'user')
db_password = cp.get('database', 'password')
db_address = cp.get('database', 'address')
db_name = cp.get('database', 'name')
bmr_name = cp.get('reddit', 'bmr_name')
# Reddit username and password
reddit_user = cp.get('reddit', 'user')
reddit_pass = cp.get('reddit', 'password')
user_agent = cp.get('reddit', 'user_agent')
# Default subreddits. Could probably use migrating to config.ini, too
default_subreddits = [
'adviceanimals',
'AskReddit',
'aww',
'bestof',
'books',
'earthporn',
'explainlikeimfive',
'funny',
'gaming',
'gifs',
'IAmA',
'movies',
'music',
'news',
'pics',
'science',
'technology',
'television',
'todayilearned',
'videos',
'worldnews',
'wtf'
]
recommend_count = cp.getint('recommender', 'recommend_count')
# How long to wait for new posts.
sleep_time = cp.getint('scraper', 'sleep_time')
# Number of submissions to go through before giving up searching
# In order to find submissions newer or older than submission x, we have to go
# through the new queue until we find submission x, and then go up or down the
# list. This setting controls how soon we give up if we can't find submission x.
give_up_search_after = cp.getint('scraper', 'give_up_search_after')
reply_template ="""Hi, I'm a bot. Here is some stuff:
## This post
* Subreddits subscribed to: **{sub_count}**
* {std_dev_string}
* **{default_count}** of those are defaults
## Recommendations
Based on your subscriptions, here are some recommendations:
{recommendations}
(for more information on this bot see /r/bmr_statbot)
"""
| {
"content_hash": "83b7c90fac19990d0ada81158978d38e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 80,
"avg_line_length": 24.946666666666665,
"alnum_prop": 0.6424371993586317,
"repo_name": "DeeUnderscore/bmr_statbot",
"id": "8a377c585b17b40ec821d6198bbe2c164e6825e7",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmr_statbot/config.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "6713"
},
{
"name": "Python",
"bytes": "29669"
}
],
"symlink_target": ""
} |
import argparse
import functools
import sys
import os
import json
import torch
from program_synthesis.karel import arguments
from program_synthesis.karel import dataset
from program_synthesis.karel import models
from program_synthesis.karel.dataset import executor
from program_synthesis.common.tools import saver
from program_synthesis.algolisp.tools import evaluation
def evaluate(args):
print("Evaluation:")
print("\tModel type: %s\n\tModel path: %s" % (args.model_type, args.model_dir))
saver.restore_args(args)
arguments.backport_default_args(args)
dataset.set_vocab(args)
m = models.get_model(args)
if args.eval_final:
eval_dataset = dataset.get_eval_final_dataset(args, m)
elif args.eval_train:
eval_dataset = dataset.get_train_dataset(args, m, for_eval=True)
else:
eval_dataset = dataset.get_eval_dataset(args, m)
if m.last_step == 0:
raise ValueError('Attempting to evaluate on untrained model')
m.model.eval()
current_executor = executor.get_executor(args)()
if args.example_id is not None:
eval_dataset.data = [eval_dataset.task[args.example_id]]
evaluation.run_eval(
args.tag, eval_dataset, m.inference,
current_executor.execute, not args.hide_example_info,
args.report_path)
if __name__ == "__main__":
parser = arguments.get_arg_parser('Evaluating Text2Code', 'eval')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not args.model_type or (not args.model_dir and args.model_type != 'search'):
raise ValueError("Specify model_dir and model_type")
if not args.tag:
args.tag = args.model_type
evaluate(args)
| {
"content_hash": "234fa97d6346f7dad686fa283cc5d6c8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 33.21153846153846,
"alnum_prop": 0.6965836711059641,
"repo_name": "nearai/program_synthesis",
"id": "aaf9c371d896c43b190400469dfd2fb8c87a490e",
"size": "1727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "program_synthesis/karel/eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "14936"
},
{
"name": "Jupyter Notebook",
"bytes": "2469525"
},
{
"name": "Python",
"bytes": "1024751"
}
],
"symlink_target": ""
} |
import itertools
import glob
import math
import unittest
import sift_util
import sift_descriptors_pb2
import tempfile
class TestSiftUtilFunctions(unittest.TestCase):
def test_read_parameters(self):
extraction_parameters = \
sift_util.get_extraction_parameters('test_data/seminar.sift')
self.assertEqual(extraction_parameters.normalization_threshold, 0.5)
self.assertEqual(extraction_parameters.rotation_invariance, False)
self.assertEqual(extraction_parameters.discard_unnormalized, True)
self.assertEqual(extraction_parameters.multiscale, True)
self.assertEqual(extraction_parameters.percentage, 1.0)
self.assertEqual(extraction_parameters.minimum_radius, 16)
self.assertEqual(extraction_parameters.fractional_xy, True)
self.assertEqual(extraction_parameters.resolution_factor, 1)
self.assertEqual(extraction_parameters.top_left_x, 0)
self.assertEqual(extraction_parameters.top_left_y, 0)
self.assertEqual(extraction_parameters.bottom_right_x, 4294967295)
self.assertEqual(extraction_parameters.bottom_right_y, 4294967295)
self.assertAlmostEqual(extraction_parameters.first_level_smoothing, 1.3)
def test_read_descriptor(self):
descriptors = sift_util.load_descriptors('test_data/seminar.sift')
self.assertEqual(1369, len(descriptors.sift_descriptor))
def test_count(self):
self.assertEqual(sift_util.count_descriptors_in_list([]), 0)
self.assertEqual(sift_util.count_descriptors_in_file(
'test_data/seminar.sift'), 1369)
self.assertEqual(sift_util.count_descriptors_in_list(
['test_data/seminar.sift', 'test_data/seminar.sift']), 1369 * 2)
def test_read_numpy_array_from_files_noalpha(self):
""" Check the reading of sift descriptors from files.
This doesn't check the application of the alpha parameter, just
number of descriptors loaded and that max_points is being
observed.
"""
# Test loading from empty list
point_array = sift_util.load_array_from_files(file_list=[],
max_points=300)
self.assertEqual(len(point_array), 0)
# Test loading with max points less than num available
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/seminar.sift'), max_points=300)
self.assertTrue(len(point_array) > 0)
self.assertTrue(len(point_array) <= 300)
# Test loading with max points equal to num available
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/seminar.sift'), max_points=1369)
self.assertEqual(len(point_array), 1369)
# Test loading with max points not specified
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/seminar.sift'))
self.assertEqual(len(point_array), 1369)
# Test loading with max points higher than num available
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/seminar.sift'), max_points=1500)
self.assertEqual(len(point_array), 1369)
# Test loading from list with max points lower than available
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/*.sift'), max_points=1369)
self.assertTrue(len(point_array) <= 1369)
# Test loading from list with max points lower than available, but higher than a single file
point_array = sift_util.load_array_from_files(
file_list=glob.glob('test_data/*.sift'), max_points=1500)
self.assertTrue(len(point_array) > 1369) # Check that more descriptors than from a single file
self.assertTrue(len(point_array) <= 1500) # Check that within max_points
def test_read_numpy_array_files_alpha(self):
""" Checks the use of the alpha parameter in the loading of sift files.
"""
# Test loading with alpha = 0
point_array = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), max_points=1369)
self.assertEqual(point_array.shape[1], 128)
point_array_1_0 = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), alpha=1.0, max_points=1369)
self.assertEqual(point_array_1_0.shape[1], 130)
point_array_0_1 = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), alpha=0.1, max_points=1369)
self.assertEqual(point_array_0_1.shape[1], 130)
for d_1, d_2 in itertools.izip(point_array_1_0, point_array_0_1):
self.assertTrue(math.fabs(d_1[-2] * 0.1 - d_2[-2]) <= 1)
self.assertTrue(math.fabs(d_1[-1] * 0.1 - d_2[-1]) <= 1)
def test_read_numpy_array_files_alpha_over_one(self):
""" Checks the use of the alpha parameter in the loading of sift files.
"""
# Test loading with alpha = 0
point_array = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), max_points=1369)
self.assertEqual(point_array.shape[1], 128)
point_array_1_0 = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), alpha=1.0, max_points=1369)
self.assertEqual(point_array_1_0.shape[1], 130)
point_array_2_0 = sift_util.load_array_from_files(file_list=glob.glob('test_data/seminar.sift'), alpha=2.0, max_points=1369)
self.assertEqual(point_array_2_0.shape[1], 130)
for d_1, d_2 in itertools.izip(point_array_1_0, point_array_2_0):
self.assertTrue(math.fabs(d_1[-2] * 2.0 - d_2[-2]) <= 1, 'd_1[-2] = %d, d_2[-2] = %d' % (d_1[-2], d_2[-2]))
self.assertTrue(math.fabs(d_1[-1] * 2.0 - d_2[-1]) <= 1, 'd_1[-1] = %d, d_2[-1] = %d' % (d_1[-1], d_2[-1]))
def test_protobuf_to_numpy_converstion(self):
descriptor = sift_descriptors_pb2.SiftDescriptor()
descriptor.bin.append(15)
descriptor.bin.append(35)
descriptor.x = 0.2
descriptor.y = 0.3
descriptor.scale = 1
descriptor_array = sift_util.convert_protobuf_descriptor_to_weighted_array(descriptor)
self.assertEqual(len(descriptor_array), 2)
self.assertAlmostEqual(descriptor_array[0], 15)
self.assertAlmostEqual(descriptor_array[1], 35)
alpha = 0.5
descriptor_array = sift_util.convert_protobuf_descriptor_to_weighted_array(descriptor, alpha)
self.assertEqual(len(descriptor_array), 4)
self.assertAlmostEqual(descriptor_array[0], 15)
self.assertAlmostEqual(descriptor_array[1], 35)
self.assertAlmostEqual(descriptor_array[2], int(0.2 * 127 * alpha + 0.5))
self.assertAlmostEqual(descriptor_array[3], int(0.3 * 127 * alpha + 0.5))
alpha = 1.5
descriptor_array = sift_util.convert_protobuf_descriptor_to_weighted_array(descriptor, alpha)
self.assertEqual(len(descriptor_array), 4)
self.assertAlmostEqual(descriptor_array[0], 15)
self.assertAlmostEqual(descriptor_array[1], 35)
self.assertAlmostEqual(descriptor_array[2], int(0.2 * 127 * alpha + 0.5))
self.assertAlmostEqual(descriptor_array[3], int(0.3 * 127 * alpha + 0.5))
def test_merge_descriptor_sets(self):
files = ['test_data/seminar.sift', 'test_data/Glass_is_Liquide.sift']
descriptor_set_list = []
for f in files:
descriptor_set = sift_util.load_descriptors(f)
descriptor_set_list.append(descriptor_set)
merged_set = sift_util.merge_descriptor_sets(descriptor_set_list)
self.assertEqual(sift_util.count_descriptors_in_list(files),
len(merged_set.sift_descriptor))
def test_convert_bare_to_params_prepend(self):
""" Tests conversion from a bare protobuf to one with params prepended.
"""
destination = tempfile.TemporaryFile()
original = open('test_data/seminar.old-sift', 'rb')
sift_util.convert_bare_set_to_set_with_params(original, destination)
self.assertEqual(original.read(), destination.read())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "57ee312576b0455286d2ab0cacc956c2",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 132,
"avg_line_length": 53.30967741935484,
"alnum_prop": 0.6553309935858647,
"repo_name": "sanchom/sjm",
"id": "e6756d4c7799f06997f6e43284af26f7da40986d",
"size": "9578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sift/sift_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3129"
},
{
"name": "C++",
"bytes": "364964"
},
{
"name": "Protocol Buffer",
"bytes": "6617"
},
{
"name": "Python",
"bytes": "69030"
}
],
"symlink_target": ""
} |
from lampost.di.resource import Injected, module_inject
from lampost.db.exceptions import DataError
from lampost.editor.editor import Editor
from lampmud.comm.broadcast import BroadcastMap, Broadcast, broadcast_types
mud_actions = Injected('mud_actions')
module_inject(__name__)
class SocialsEditor(Editor):
def __init__(self):
super().__init__('social')
@staticmethod
def preview(source, target, b_map, self_source, **_):
broadcast = Broadcast(BroadcastMap(**b_map), source, source if self_source else target)
return {broadcast_type['id']: broadcast.substitute(broadcast_type['id']) for broadcast_type in broadcast_types}
def _pre_create(self, obj_def, *_):
if mud_actions.primary(obj_def['dbo_id']):
raise DataError("Verb already in use")
def _ensure_name(obj_def):
name = obj_def['name'] or obj_def['verb'] or obj_def['dbo_id']
obj_def['name'] = name.capitalize()
class SkillEditor(Editor):
def _pre_create(self, obj_def, *_):
_ensure_name(obj_def)
def _pre_update(self, obj_def, *_):
_ensure_name(obj_def)
| {
"content_hash": "a4ebd1c51deac9461bb308ede009503f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 119,
"avg_line_length": 32.02857142857143,
"alnum_prop": 0.6636931311329171,
"repo_name": "genzgd/Lampost-Mud",
"id": "d2374d322e4f51f85cafe77e18b5f77e5dfd570f",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lampmud/editor/shared.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "HTML",
"bytes": "117576"
},
{
"name": "JavaScript",
"bytes": "217160"
},
{
"name": "Python",
"bytes": "121355"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
import re
import mmap
import contextlib
import os
from ..utils import *
class FindUseCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
symbol = view.substr(view.word(view.sel()[0]))
if re.match(r"\w", symbol) is None:
return sublime.status_message('Not a valid symbol "%s" !' % symbol)
self.namespaces = find_symbol(symbol, view.window())
if len(self.namespaces) == 1:
self.view.run_command("import_use", {"namespace": self.namespaces[0][0]})
if len(self.namespaces) > 1:
view.window().show_quick_panel(self.namespaces, self.on_done)
def on_done(self, index):
if index == -1:
return
self.view.run_command("import_use", {"namespace": self.namespaces[index][0]}) | {
"content_hash": "6ea21975690e19448c5e0303c4577505",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 29.275862068965516,
"alnum_prop": 0.6230859835100118,
"repo_name": "sergeylunev/sublimated-symfony",
"id": "8468d7a541ec1d78336e6e9343c2ed68dfa7be35",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublimated_symfony/commands/finduse_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14402"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.