text stringlengths 4 1.02M | meta dict |
|---|---|
import logging
from django import http
from django.conf.urls.defaults import patterns, url
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from ella_imports.models import Server, ServerItem
logger = logging.getLogger("ella_imports")
try:
import newman
class ServerAdmin(newman.NewmanModelAdmin):
list_display = ('title', 'domain', 'url', 'regenerate')
search_fields = ('domain, title', 'url',)
prepopulated_fields = {'slug' : ('title',)}
actions = ['a_fetch_servers']
def a_fetch_servers(self, request, queryset):
failures = []
for server in queryset:
try:
server.fetch()
except Exception, e:
failures.append('Fetch "%s" fail (%s)' % (server, e))
if failures:
self.message_user(request, ', '.join(failures))
a_fetch_servers.short_description = _('Fetch selected servers')
def get_urls(self):
urls = patterns('',
url(r'^(.+)/fetch/$',
self.fetch_view,
name='server-fetch'),
)
urls += super(ServerAdmin, self).get_urls()
return urls
def fetch_view(self, request, pk, extra_context=None):
server = get_object_or_404(Server, pk=pk)
try:
server.fetch()
return http.HttpResponse('OK')
except Exception, e:
return http.HttpResponse('KO ' + str(e))
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'category':
kwargs['required'] = False
return super(ServerAdmin, self).formfield_for_dbfield(db_field, **kwargs)
class ServerItemAdmin(newman.NewmanModelAdmin):
list_display = ('title', 'server', 'updated', 'priority')
list_filter = ('server', 'updated',)
raw_id_fields = ('photo',)
actions = []
newman.site.register(Server, ServerAdmin)
newman.site.register(ServerItem, ServerItemAdmin)
except ImportError:
pass
| {
"content_hash": "3587b33b30caca0a27ea635d79603800",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 85,
"avg_line_length": 32.029411764705884,
"alnum_prop": 0.5661157024793388,
"repo_name": "ella/ella-imports",
"id": "3f139fbef2c332ce99cfc48b1b285da83028903f",
"size": "2178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ella_imports/newman_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19815"
}
],
"symlink_target": ""
} |
import urllib2
import codecs
from urllib2 import HTTPError
class HttpFileTransfer(object):
def __init__(self, domain, user, password, is_ssl=False):
protocol = 'http'
if is_ssl:
protocol = 'https'
self.__domain = protocol + '://' + domain
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, domain, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
self.__opener = urllib2.build_opener(handler)
def get_file(self, file_path, local_file_path, local_file_encode):
url = self.__domain + file_path
try:
res = self.__opener.open(url)
except HTTPError, err:
if err.code == 404:
return
else:
raise HTTPError(err.filename, err.code, err.msg, err.hdrs, err.fp)
result_file = codecs.open(local_file_path, 'wb', local_file_encode)
for line in res.readlines():
result_file.write(unicode(line, local_file_encode))
result_file.close()
| {
"content_hash": "648269dcfac8c51ef38655e692faf793",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 37.37931034482759,
"alnum_prop": 0.6060885608856088,
"repo_name": "Akira-Taniguchi/transfer_lib",
"id": "b5d076e84dd0cb00eb186e0bef1553327d2c48ea",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transfer_lib/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5732"
}
],
"symlink_target": ""
} |
from booster import app, db
class Picture(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
description = db.Column(db.Text)
width = db.Column(db.Integer)
height = db.Column(db.Integer)
filename = db.Column(db.String(128))
| {
"content_hash": "1fb01834cc91701c3d79e51db66597b3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.6759581881533101,
"repo_name": "zknight/booster",
"id": "708345d030340e99e0716418b5bf2dda544ba3d6",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booster/models/picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8304"
},
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "37825"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
from TaskTemplate import *
from acq4.devices.DAQGeneric.taskGUI import DAQGenericTaskGui
from acq4.devices.Device import TaskGui
#from acq4.pyqtgraph.WidgetGroup import WidgetGroup
import numpy as np
import acq4.pyqtgraph as pg
#from acq4.pyqtgraph.graphicsItems import InfiniteLine, VTickGroup
#from PyQt4 import Qwt5 as Qwt
class CameraTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev, taskRunner, ownUi=False) ## When initializing superclass, make sure it knows this class is creating the ui.
self.ui = Ui_Form()
self.ui.setupUi(self)
self.stateGroup = pg.WidgetGroup(self) ## create state group before DAQ creates its own interface
self.ui.horizSplitter.setStretchFactor(0, 0)
self.ui.horizSplitter.setStretchFactor(1, 1)
DAQGenericTaskGui.createChannelWidgets(self, self.ui.ctrlSplitter, self.ui.plotSplitter)
self.ui.plotSplitter.setStretchFactor(0, 10)
self.ui.plotSplitter.setStretchFactor(1, 1)
self.ui.plotSplitter.setStretchFactor(2, 1)
## plots should not be storing more than one trace at a time.
for p in self.plots.values():
p.plotItem.ctrl.maxTracesCheck.setChecked(True)
p.plotItem.ctrl.maxTracesSpin.setValue(1)
p.plotItem.ctrl.forgetTracesCheck.setChecked(True)
conf = self.dev.camConfig
tModes = self.dev.listParams('triggerMode')[0]
for m in tModes:
item = self.ui.triggerModeCombo.addItem(m)
self.vLines = []
if 'trigger' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['trigger'].addItem(l)
if 'exposure' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['exposure'].addItem(l)
self.frameTicks = pg.VTickGroup()
self.frameTicks.setYRange([0.8, 1.0])
self.ui.imageView.sigTimeChanged.connect(self.timeChanged)
self.taskRunner.sigTaskPaused.connect(self.taskPaused)
def timeChanged(self, i, t):
for l in self.vLines:
l.setValue(t)
def saveState(self):
s = self.currentState()
s['daqState'] = DAQGenericTaskGui.saveState(self)
return s
def restoreState(self, state):
self.stateGroup.setState(state)
if 'daqState' in state:
DAQGenericTaskGui.restoreState(self, state['daqState'])
def generateTask(self, params=None):
daqProt = DAQGenericTaskGui.generateTask(self, params)
if params is None:
params = {}
state = self.currentState()
task = {
'record': state['recordCheck'],
'triggerProtocol': state['triggerCheck'],
'params': {
'triggerMode': state['triggerModeCombo']
}
}
task['channels'] = daqProt
if state['releaseBetweenRadio']:
task['pushState'] = None
task['popState'] = None
return task
def taskSequenceStarted(self):
DAQGenericTaskGui.taskSequenceStarted(self)
if self.ui.releaseAfterRadio.isChecked():
self.dev.pushState('cam_proto_state')
def taskFinished(self):
DAQGenericTaskGui.taskFinished(self)
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
def taskPaused(self): ## If the task is paused, return the camera to its previous state until we start again
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
self.dev.pushState('cam_proto_state')
def currentState(self):
return self.stateGroup.state()
def handleResult(self, result, params):
state = self.stateGroup.state()
if state['displayCheck']:
if result is None or len(result.frames()) == 0:
print "No images returned from camera task."
self.ui.imageView.clear()
else:
frameTimes, precise = result.frameTimes()
if precise:
self.ui.imageView.setImage(result.asMetaArray(), xvals=frameTimes)
self.frameTicks.setXVals(frameTimes)
else:
self.ui.imageView.setImage(result.asMetaArray())
DAQGenericTaskGui.handleResult(self, result.daqResult(), params)
def quit(self):
self.ui.imageView.close()
DAQGenericTaskGui.quit(self)
| {
"content_hash": "a02f3547309a0c755a2d0dbb5a07fd69",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 154,
"avg_line_length": 38.104,
"alnum_prop": 0.6076002519420534,
"repo_name": "mgraupe/acq4",
"id": "4e2a0e4b5499ed5790307f68595255eabc77b4f6",
"size": "4787",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "acq4/devices/Camera/taskGUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "757367"
},
{
"name": "C++",
"bytes": "1222891"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "6110588"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have protobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-dram-ctrl.cfg")
# system simulated
system = System(cpu = cpu, physmem = DDR3_1600_x64(),
membus = NoncoherentBus(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
VoltageDomain()))
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
| {
"content_hash": "3006c2d79870b153fa88cad239c25a6a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 33.31578947368421,
"alnum_prop": 0.6595576619273301,
"repo_name": "Menooker/gem5_pcm",
"id": "c98b9265036b60e3e6aeb9557f6892054181a0f2",
"size": "3365",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/configs/tgen-dram-ctrl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "239800"
},
{
"name": "C",
"bytes": "956307"
},
{
"name": "C++",
"bytes": "13636554"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136695"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "19064"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "3241"
},
{
"name": "Python",
"bytes": "3516303"
},
{
"name": "Shell",
"bytes": "49489"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
from typing import Optional, Dict, Tuple
import os
import aiohttp
from hailtop.config import get_deploy_config, DeployConfig
from hailtop.utils import async_to_blocking, request_retry_transient_errors
from hailtop import httpx
from .tokens import get_tokens
def namespace_auth_headers(deploy_config: DeployConfig,
ns: str,
authorize_target: bool = True,
*,
token_file: Optional[str] = None
) -> Dict[str, str]:
headers = {}
if authorize_target:
headers['Authorization'] = f'Bearer {get_tokens(token_file).namespace_token_or_error(ns)}'
if deploy_config.location() == 'external' and ns != 'default':
headers['X-Hail-Internal-Authorization'] = f'Bearer {get_tokens(token_file).namespace_token_or_error("default")}'
return headers
def service_auth_headers(deploy_config: DeployConfig,
service: str,
authorize_target: bool = True,
*,
token_file: Optional[str] = None
) -> Dict[str, str]:
ns = deploy_config.service_ns(service)
return namespace_auth_headers(deploy_config, ns, authorize_target, token_file=token_file)
def deploy_config_and_headers_from_namespace(namespace: Optional[str] = None, *, authorize_target: bool = True) -> Tuple[DeployConfig, Dict[str, str], str]:
deploy_config = get_deploy_config()
if namespace is not None:
deploy_config = deploy_config.with_default_namespace(namespace)
else:
namespace = deploy_config.default_namespace()
headers = namespace_auth_headers(deploy_config, namespace, authorize_target=authorize_target)
return (deploy_config, headers, namespace)
async def async_get_userinfo(*,
deploy_config: Optional[DeployConfig] = None,
session_id: Optional[str] = None,
client_session: Optional[httpx.ClientSession] = None):
if deploy_config is None:
deploy_config = get_deploy_config()
if session_id is None:
headers = service_auth_headers(deploy_config, 'auth')
else:
headers = {'Authorization': f'Bearer {session_id}'}
userinfo_url = deploy_config.url('auth', '/api/v1alpha/userinfo')
async def request(session):
try:
resp = await request_retry_transient_errors(
session, 'GET', userinfo_url, headers=headers)
return await resp.json()
except aiohttp.client_exceptions.ClientResponseError as err:
if err.status == 401:
return None
raise
if client_session is None:
async with httpx.client_session() as session:
return await request(session)
return await request(client_session)
def get_userinfo(deploy_config=None, session_id=None, client_session=None):
return async_to_blocking(async_get_userinfo(
deploy_config=deploy_config,
session_id=session_id,
client_session=client_session))
def copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None):
return async_to_blocking(async_copy_paste_login(copy_paste_token, namespace))
async def async_copy_paste_login(copy_paste_token: str, namespace: Optional[str] = None):
deploy_config, headers, namespace = deploy_config_and_headers_from_namespace(namespace, authorize_target=False)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=5),
headers=headers) as session:
async with await request_retry_transient_errors(
session, 'POST', deploy_config.url('auth', '/api/v1alpha/copy-paste-login'),
params={'copy_paste_token': copy_paste_token}) as resp:
data = await resp.json()
token = data['token']
username = data['username']
tokens = get_tokens()
tokens[namespace] = token
dot_hail_dir = os.path.expanduser('~/.hail')
if not os.path.exists(dot_hail_dir):
os.mkdir(dot_hail_dir, mode=0o700)
tokens.write()
return namespace, username
def get_user(username: str, namespace: Optional[str] = None) -> dict:
return async_to_blocking(async_get_user(username, namespace))
async def async_get_user(username: str, namespace: Optional[str] = None) -> dict:
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=30),
headers=headers) as session:
async with await request_retry_transient_errors(
session, 'GET', deploy_config.url('auth', f'/api/v1alpha/users/{username}')) as resp:
return await resp.json()
def create_user(username: str, login_id: str, is_developer: bool, is_service_account: bool, namespace: Optional[str] = None):
return async_to_blocking(async_create_user(username, login_id, is_developer, is_service_account, namespace=namespace))
async def async_create_user(username: str, login_id: str, is_developer: bool, is_service_account: bool, namespace: Optional[str] = None):
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
body = {
'login_id': login_id,
'is_developer': is_developer,
'is_service_account': is_service_account,
}
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=30),
headers=headers) as session:
await request_retry_transient_errors(
session, 'POST', deploy_config.url('auth', f'/api/v1alpha/users/{username}/create'), json=body
)
def delete_user(username: str, namespace: Optional[str] = None):
return async_to_blocking(async_delete_user(username, namespace=namespace))
async def async_delete_user(username: str, namespace: Optional[str] = None):
deploy_config, headers, _ = deploy_config_and_headers_from_namespace(namespace)
async with aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=300),
headers=headers) as session:
await request_retry_transient_errors(
session, 'DELETE', deploy_config.url('auth', f'/api/v1alpha/users/{username}')
)
| {
"content_hash": "8c1e83592374b8dcc5dc9ee5c184a143",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 156,
"avg_line_length": 40.24223602484472,
"alnum_prop": 0.6414570149714462,
"repo_name": "hail-is/hail",
"id": "1c4d59bff9257f789c1d7ca311a8b6dbebeef7a0",
"size": "6479",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/hailtop/auth/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
"""From http://stackoverflow.com/a/12260597/400691."""
import sys
import dj_database_url
import django
from colour_runner.django_runner import ColourRunnerMixin
from django.conf import settings
from django.test.runner import DiscoverRunner
settings.configure(
DATABASES={'default': dj_database_url.config(
default='postgres://localhost/model_logging',
)},
DEFAULT_FILE_STORAGE='inmemorystorage.InMemoryStorage',
MIDDLEWARE_CLASSES=(),
PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),
ROOT_URLCONF='',
SITE_ID=1,
INSTALLED_APPS=(
'model_logging',
'django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
),
)
django.setup()
class TestRunner(ColourRunnerMixin, DiscoverRunner):
"""Enable coloured output for tests."""
test_runner = TestRunner(verbosity=1)
failures = test_runner.run_tests(['model_logging'])
if failures:
sys.exit(1)
| {
"content_hash": "e757c2276e509ffa84301a145b0e7b48",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 72,
"avg_line_length": 23.90909090909091,
"alnum_prop": 0.6901140684410646,
"repo_name": "incuna/django-model-logging",
"id": "ac7445bdd9f8f2c6e4898ff67ec19c839e27bf44",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_logging/tests/run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "221"
},
{
"name": "Python",
"bytes": "22897"
}
],
"symlink_target": ""
} |
import argparse
import json
import requests
import xml.dom.minidom
def log(msg):
print(msg)
class Script:
def main(self):
name = self.__class__.__name__.lower()
parser = argparse.ArgumentParser(description = name)
parser.add_argument('--host',
help='[Host/IP Address]:port. Default port is localhost:6580',
default='localhost:6580')
parser.add_argument('-f', '--format',
help='Format for results. Default is json.',
default='json',
choices=('json', 'txt', 'xml'))
parser.add_argument('-u', '--user',
help='Specify the user name and password to use for authentication')
self.add_arguments(parser)
args = parser.parse_args()
try:
host, port = args.host.split(':')
except ValueError:
host = args.host
port = "6580"
self.host = host
self.port = port
self.baseurl = 'http://' + host + ':' + port + '/rfcode_zonemgr/zonemgr/'
self.format = args.format
self.auth = args.user and tuple(args.user.split(':'))
self.run(args)
def url(self, api):
return self.baseurl + api + '.' + self.format
def http_get(self, urlpath, *args, **kwargs):
if len(args) == 0:
session = requests
kwargs['auth'] = self.auth
else:
session = args[0]
req = session.get(self.url(urlpath), **kwargs)
log('URL: ' + req.url)
req.raise_for_status()
if self.format == 'json':
log(json.dumps(req.json, indent=True))
elif self.format == 'xml':
log(xml.dom.minidom.parseString(req.text).toprettyxml())
else:
log(req.text)
return req
def add_arguments(self, parser):
pass
def run(self, args):
pass
| {
"content_hash": "77594ed01fddf175d4f69529cb9dc270",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 96,
"avg_line_length": 30.015151515151516,
"alnum_prop": 0.5143866733972741,
"repo_name": "rfcode/zonemanager-api-examples",
"id": "7636e2e5a64d6b212364890b993e49990d07f9e6",
"size": "2618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/script.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "18774"
},
{
"name": "Python",
"bytes": "9763"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
} |
from tqdm import tqdm_notebook
import os
import glob
import natsort
import numpy as np
import scipy as sp
from scipy.optimize import leastsq, curve_fit
from scipy import interpolate, integrate
from scipy import spatial
# from scipy.interpolate import interp1d
from scipy.io import loadmat, savemat
# import scipy.misc
import importlib
from IPython.display import display, HTML
import pandas as pd
import pickle
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.colors as colors
from matplotlib import animation, rc
import matplotlib.ticker as mtick
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes
from mpl_toolkits.mplot3d import Axes3D, axes3d
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib import cm
from time import time
from datetime import datetime
from src.support_class import *
from src import jeffery_model as jm
from codeStore import support_fun as spf
from codeStore import support_fun_table as spf_tb
# %matplotlib notebook
from src.objComposite import *
rc('animation', html='html5')
fontsize = 40
PWD = os.getcwd()
pickle_name = 'ecoC01B01_tau1c' + '_kwargs'
problem_kwargs = spf_tb.load_problem_kwargs(pickle_name)
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.show_u_nodes()
ecoli_comp.get_obj_list()[0].show_u_nodes()
ecoli_comp.get_obj_list()[1].show_u_nodes()
# tnode1, tnode2 = get_ecoli_nodes_split_at(0, 0, 0,
# now_center=np.zeros(3), **problem_kwargs)
# fig = plt.figure(figsize=(5, 5))
# fig.patch.set_facecolor('white')
# ax0 = fig.add_subplot(111, projection='3d')
# for spine in ax0.spines.values():
# spine.set_visible(False)
# ax0.plot(tnode1[:, 0], tnode1[:, 1], tnode1[:, 2])
# ax0.plot(tnode2[:, 0], tnode2[:, 1], tnode2[:, 2])
# ax0.set_xlabel('X')
# ax0.set_ylabel('Y')
# ax0.set_zlabel('Z')
# plt.show()
| {
"content_hash": "114ce198578c2c1deafab57d35edfd0f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 32.049180327868854,
"alnum_prop": 0.7416879795396419,
"repo_name": "pcmagic/stokes_flow",
"id": "19434aa868e438ef0c0f36087391508239c73b69",
"size": "3275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "try_code/try_show_twoPart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32833"
},
{
"name": "C++",
"bytes": "221"
},
{
"name": "CSS",
"bytes": "1645"
},
{
"name": "Fortran",
"bytes": "12772"
},
{
"name": "Gnuplot",
"bytes": "2957"
},
{
"name": "HTML",
"bytes": "22464"
},
{
"name": "JavaScript",
"bytes": "9553"
},
{
"name": "Jupyter Notebook",
"bytes": "326253745"
},
{
"name": "MATLAB",
"bytes": "82969"
},
{
"name": "Makefile",
"bytes": "6488"
},
{
"name": "Mathematica",
"bytes": "765914"
},
{
"name": "Objective-C",
"bytes": "793"
},
{
"name": "Python",
"bytes": "1404660"
}
],
"symlink_target": ""
} |
from .base import Predicate
from .explanation import Explanation
from .to_pred import to_pred
class is_all(Predicate):
"""
Generates a predicate that will consider data valid if and only if all of
the given predicates considers the data valid.
"""
def __init__(self, *predicates):
self._predicates = []
for predicate in predicates:
if isinstance(predicate, is_all):
self._predicates.extend(predicate._predicates)
else:
self._predicates.append(to_pred(predicate))
def _evaluate_explain(self, data, context):
reasons, errors = [], []
for predicate in self._predicates:
explanation = predicate.explain(data, context)
(reasons if explanation else errors).append(explanation)
return Explanation(
True, 'all_hold',
'all of the given predicates hold',
reasons,
) if not errors else Explanation(
False, 'not_all_hold',
'at least one of the given predicates does not hold',
errors,
)
def _evaluate_no_explain(self, data, context):
return all(
predicate(data, context=context)
for predicate in self._predicates
)
| {
"content_hash": "c61b70c4eff22ee4ab0b6727b9d77aa2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 32.92307692307692,
"alnum_prop": 0.5965732087227414,
"repo_name": "Daanvdk/is_valid",
"id": "99374f95775c1af0237b66c857cc75fae362404e",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "is_valid/is_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111083"
}
],
"symlink_target": ""
} |
import tarfile
import copy
import operator
import os
class TarFile(tarfile.TarFile):
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
| {
"content_hash": "0e32dd2bad4902395488ffd2c181f1b4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 38.31578947368421,
"alnum_prop": 0.5686813186813187,
"repo_name": "mjmottram/snoing",
"id": "70119d94c59e76b26055431cfab99f6e46af6b9f",
"size": "1821",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/snoing_tarfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186832"
},
{
"name": "Shell",
"bytes": "552"
}
],
"symlink_target": ""
} |
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Count, Q
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from circuits.models import Circuit
from dcim.models import Site, Rack, Device
from ipam.models import IPAddress, Prefix, VLAN, VRF
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from .models import Tenant, TenantGroup
from . import filters, forms, tables
#
# Tenant groups
#
class TenantGroupListView(ObjectListView):
queryset = TenantGroup.objects.annotate(tenant_count=Count('tenants'))
table = tables.TenantGroupTable
template_name = 'tenancy/tenantgroup_list.html'
class TenantGroupEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'tenancy.change_tenantgroup'
model = TenantGroup
form_class = forms.TenantGroupForm
def get_return_url(self, request, obj):
return reverse('tenancy:tenantgroup_list')
class TenantGroupBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'tenancy.delete_tenantgroup'
cls = TenantGroup
default_return_url = 'tenancy:tenantgroup_list'
#
# Tenants
#
class TenantListView(ObjectListView):
queryset = Tenant.objects.select_related('group')
filter = filters.TenantFilter
filter_form = forms.TenantFilterForm
table = tables.TenantTable
template_name = 'tenancy/tenant_list.html'
def tenant(request, slug):
tenant = get_object_or_404(Tenant, slug=slug)
stats = {
'site_count': Site.objects.filter(tenant=tenant).count(),
'rack_count': Rack.objects.filter(tenant=tenant).count(),
'device_count': Device.objects.filter(tenant=tenant).count(),
'vrf_count': VRF.objects.filter(tenant=tenant).count(),
'prefix_count': Prefix.objects.filter(
Q(tenant=tenant) |
Q(tenant__isnull=True, vrf__tenant=tenant)
).count(),
'ipaddress_count': IPAddress.objects.filter(
Q(tenant=tenant) |
Q(tenant__isnull=True, vrf__tenant=tenant)
).count(),
'vlan_count': VLAN.objects.filter(tenant=tenant).count(),
'circuit_count': Circuit.objects.filter(tenant=tenant).count(),
}
return render(request, 'tenancy/tenant.html', {
'tenant': tenant,
'stats': stats,
})
class TenantEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'tenancy.change_tenant'
model = Tenant
form_class = forms.TenantForm
template_name = 'tenancy/tenant_edit.html'
default_return_url = 'tenancy:tenant_list'
class TenantDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'tenancy.delete_tenant'
model = Tenant
default_return_url = 'tenancy:tenant_list'
class TenantBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'tenancy.add_tenant'
form = forms.TenantImportForm
table = tables.TenantTable
template_name = 'tenancy/tenant_import.html'
default_return_url = 'tenancy:tenant_list'
class TenantBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'tenancy.change_tenant'
cls = Tenant
filter = filters.TenantFilter
form = forms.TenantBulkEditForm
template_name = 'tenancy/tenant_bulk_edit.html'
default_return_url = 'tenancy:tenant_list'
class TenantBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'tenancy.delete_tenant'
cls = Tenant
filter = filters.TenantFilter
default_return_url = 'tenancy:tenant_list'
| {
"content_hash": "c57d921c91bf7ef54df8b4c17b166912",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 99,
"avg_line_length": 32.03478260869565,
"alnum_prop": 0.7209554831704669,
"repo_name": "Alphalink/netbox",
"id": "97ed0eb019768edf6110e464423264c26582a514",
"size": "3684",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/tenancy/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167396"
},
{
"name": "HTML",
"bytes": "399345"
},
{
"name": "JavaScript",
"bytes": "13295"
},
{
"name": "Python",
"bytes": "937982"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
} |
import glob
import optparse
import os
import re
import struct
import subprocess
import sys
import tempfile
import test_format
BUNDLE_SIZE = 32
def AssertEquals(actual, expected):
if actual != expected:
raise AssertionError('\nEXPECTED:\n"""\n%s"""\n\nACTUAL:\n"""\n%s"""'
% (expected, actual))
def ParseHex(hex_content):
"""Parse content of @hex section and return binary data
Args:
hex_content: Content of @hex section as a string.
Yields:
Chunks of binary data corresponding to lines of given @hex section (as
strings). If line ends with r'\\', chunk is continued on the following line.
"""
bytes = []
for line in hex_content.split('\n'):
line, sep, comment = line.partition('#')
line = line.strip()
if line == '':
continue
if line.endswith(r'\\'):
line = line[:-2]
continuation = True
else:
continuation = False
for byte in line.split():
assert len(byte) == 2
bytes.append(chr(int(byte, 16)))
if not continuation:
assert len(bytes) > 0
yield ''.join(bytes)
bytes = []
assert bytes == [], r'r"\\" should not appear on the last line'
def CreateElfContent(bits, text_segment):
e_ident = {
32: '\177ELF\1',
64: '\177ELF\2'}[bits]
e_machine = {
32: 3,
64: 62}[bits]
e_phoff = 256
e_phnum = 1
e_phentsize = 0
elf_header_fmt = {
32: '<16sHHIIIIIHHHHHH',
64: '<16sHHIQQQIHHHHHH'}[bits]
elf_header = struct.pack(
elf_header_fmt,
e_ident, 0, e_machine, 0, 0, e_phoff, 0, 0, 0,
e_phentsize, e_phnum, 0, 0, 0)
p_type = 1 # PT_LOAD
p_flags = 5 # r-x
p_filesz = len(text_segment)
p_memsz = p_filesz
p_vaddr = 0
p_offset = 512
p_align = 0
p_paddr = 0
pheader_fmt = {
32: '<IIIIIIII',
64: '<IIQQQQQQ'}[bits]
pheader_fields = {
32: (p_type, p_offset, p_vaddr, p_paddr,
p_filesz, p_memsz, p_flags, p_align),
64: (p_type, p_flags, p_offset, p_vaddr,
p_paddr, p_filesz, p_memsz, p_align)}[bits]
pheader = struct.pack(pheader_fmt, *pheader_fields)
result = elf_header
assert len(result) <= e_phoff
result += '\0' * (e_phoff - len(result))
result += pheader
assert len(result) <= p_offset
result += '\0' * (p_offset - len(result))
result += text_segment
return result
def RunRdfaValidator(options, data):
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
tmp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
try:
tmp.write(CreateElfContent(options.bits, data))
tmp.close()
proc = subprocess.Popen([options.rdfaval, tmp.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
assert stderr == '', stderr
return_code = proc.wait()
finally:
tmp.close()
os.remove(tmp.name)
# Remove the carriage return characters that we get on Windows.
stdout = stdout.replace('\r', '')
return return_code, stdout
def ParseRdfaMessages(stdout):
"""Get (offset, message) pairs from rdfa validator output.
Args:
stdout: Output of rdfa validator as string.
Yields:
Pairs (offset, message).
"""
for line in stdout.split('\n'):
line = line.strip()
if line == '':
continue
if re.match(r"(Valid|Invalid)\.$", line):
continue
m = re.match(r'([0-9a-f]+): (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse line '%s'" % line
offset = int(m.group(1), 16)
message = m.group(2)
if not message.startswith('warning - '):
yield offset, message
def RunRdfaWithNopPatching(options, data_chunks):
r"""Run RDFA validator with NOP patching for better error reporting.
If the RDFA validator encounters an invalid instruction, it resumes validation
from the beginning of the next bundle, while the original, non-DFA-based
validators skip maybe one or two bytes and recover. And there are plenty of
tests where there are more than one error in a single bundle. To mitigate such
spurious disagreements, the following procedure is used: when RDFA complaints
that particular piece can't be decoded, the problematic line in @hex section
(which usually corresponds to one instruction) is replaced with NOPs and the
validator is rerun from the beginning. This process may take several
iterations (it seems it always converges in practice). All errors reported on
all such runs (sans duplicate ones) are taken as validation result. So, in a
sense, this trick is to emulate line-level recovery as opposed to bundle-
level. In practice it turns out ok, and lots of spurious errors are
eliminated. To each error message we add the stage at which it was produced,
so we can destinguish 'primary' errors from additional ones.
Example. Suppose DE AD and BE EF machine codes correspond to invalid
instructions. Lets take a look at what happens when we invoke
RunRdfaWithNopPatching(options, ['\de\ad', '\be\ef']). First the RDFA
validator is run on the code '\de\ad\be\ef\90\90\90...'. It encounters an
undecipherable instruction, produces an error message at offset zero and
stops. Now we replace what is at offset zero ('\de\ad') with corresponding
amount of nops, and run the RDFA validator again on
'\90\90\be\ef\90\90\90...'. This time it decodes first two NOPs sucessfully
and reports problem at offset 2. In the next iteration of NOP patching BE EF
is replaced with 90 90 as well, no decoding errors are reported on the next
run so the whole process stops. Finally the combined output looks like
following:
0: [0] unrecognized instruction <- produced at stage 0
2: [1] unrecognized instruction <- produced at stage 1
return code: 1 <- return code at stage 0
Args:
options: Options as produced by optparse.
Relevant fields are .bits and .update.
data_chunks: List of strings containing binary data. For the described
heuristic to work better it is desirable (although not absolutelty
required) that strings correspond to singular instructions, as it
usually happens in @hex section.
Returns:
String representing combined output from all stages. Error messages are
of the form
<offset in hex>: [<stage>] <message>
"""
data_chunks = list(data_chunks)
offset_to_chunk = {}
offset = 0
for i, chunk in enumerate(data_chunks):
offset_to_chunk[offset] = i
offset += len(chunk)
first_return_code = None
messages = [] # list of triples (offset, stage, message)
messages_set = set() # set of pairs (offset, message)
stage = 0
while True:
return_code, stdout = RunRdfaValidator(options, ''.join(data_chunks))
if first_return_code is None:
first_return_code = return_code
nop_patched = False
for offset, message in ParseRdfaMessages(stdout):
if (offset, message) in messages_set:
continue
messages.append((offset, stage, message))
messages_set.add((offset, message))
if offset in offset_to_chunk and message == 'unrecognized instruction':
chunk_no = offset_to_chunk[offset]
nops_chunk = '\x90' * len(data_chunks[chunk_no])
if nops_chunk != data_chunks[chunk_no]:
data_chunks[chunk_no] = nops_chunk
nop_patched = True
if not nop_patched:
break
stage += 1
messages.sort(key=lambda (offset, stage, _): (offset, stage))
result = ''.join('%x: [%d] %s\n' % (offset, stage, message)
for offset, stage, message in messages)
result += 'return code: %d\n' % first_return_code
return result
def CheckValidJumpTargets(options, data_chunks):
"""
Check that the validator infers valid jump targets correctly.
This test checks that the validator identifies instruction boundaries and
superinstructions correctly. In order to do that, it attempts to append a jump
to each byte at the end of the given code. Jump should be valid if and only if
it goes to the boundary between data chunks.
Note that the same chunks as in RunRdfaWithNopPatching are used, but here they
play a different role. In RunRdfaWithNopPatching the partitioning into chunks
is only relevant when the whole snippet is invalid. Here, on the other hand,
we only care about valid snippets, and we use chunks to mark valid jump
targets.
Args:
options: Options as produced by optparse.
data_chunks: List of strings containing binary data. Each such chunk is
expected to correspond to indivisible instruction or superinstruction.
Returns:
None.
"""
data = ''.join(data_chunks)
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# Since we check validity of jump target by adding jump and validating
# resulting piece, we rely on validity of original snippet.
return_code, _ = RunRdfaValidator(options, data)
assert return_code == 0, 'Can only validate jump targets on valid snippet'
valid_jump_targets = set()
pos = 0
for data_chunk in data_chunks:
valid_jump_targets.add(pos)
pos += len(data_chunk)
valid_jump_targets.add(pos)
for i in range(pos + 1):
# Encode JMP with 32-bit relative target.
jump = '\xe9' + struct.pack('<i', i - (len(data) + 5))
return_code, _ = RunRdfaValidator(options, data + jump)
if return_code == 0:
assert i in valid_jump_targets, (
'Offset 0x%x was reported valid jump target' % i)
else:
assert i not in valid_jump_targets, (
'Offset 0x%x was reported invalid jump target' % i)
def Test(options, items_list):
info = dict(items_list)
if 'rdfa_output' in info:
data_chunks = list(ParseHex(info['hex']))
stdout = RunRdfaWithNopPatching(options, data_chunks)
print ' Checking rdfa_output field...'
if options.update:
if stdout != info['rdfa_output']:
print ' Updating rdfa_output field...'
info['rdfa_output'] = stdout
else:
AssertEquals(stdout, info['rdfa_output'])
last_line = re.search('return code: (-?\d+)\n$', info['rdfa_output'])
expected_return_code = int(last_line.group(1))
# This test only works for valid snippets, see CheckValidJumpTargets
# for details.
if expected_return_code == 0:
print ' Checking jump targets...'
CheckValidJumpTargets(options, data_chunks)
# Update field values, but preserve their order.
items_list = [(field, info[field]) for field, _ in items_list]
return items_list
def main(args):
parser = optparse.OptionParser()
parser.add_option('--rdfaval', default='validator_test',
help='Path to the ncval validator executable')
parser.add_option('--bits',
type=int,
help='The subarchitecture to run tests against: 32 or 64')
parser.add_option('--update',
default=False,
action='store_true',
help='Regenerate golden fields instead of testing')
options, args = parser.parse_args(args)
if options.bits not in [32, 64]:
parser.error('specify --bits 32 or --bits 64')
if len(args) == 0:
parser.error('No test files specified')
processed = 0
for glob_expr in args:
test_files = sorted(glob.glob(glob_expr))
if len(test_files) == 0:
raise AssertionError(
'%r matched no files, which was probably not intended' % glob_expr)
for test_file in test_files:
print 'Testing %s...' % test_file
tests = test_format.LoadTestFile(test_file)
tests = [Test(options, test) for test in tests]
if options.update:
test_format.SaveTestFile(tests, test_file)
processed += 1
print '%s test files were processed.' % processed
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "50e17cf6ea42d1dfce24437cd1a27520",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 80,
"avg_line_length": 32.123655913978496,
"alnum_prop": 0.6563179916317992,
"repo_name": "Lind-Project/native_client",
"id": "01b8454bbef97cc2c6ecde5b0afe60a4ef33ed85",
"size": "12141",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/trusted/validator_x86/testscripts/run_rdfa_validator_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "149910"
},
{
"name": "Batchfile",
"bytes": "10418"
},
{
"name": "C",
"bytes": "10425715"
},
{
"name": "C++",
"bytes": "7409986"
},
{
"name": "HTML",
"bytes": "183711"
},
{
"name": "JavaScript",
"bytes": "5925"
},
{
"name": "Logos",
"bytes": "647"
},
{
"name": "Makefile",
"bytes": "65439"
},
{
"name": "Objective-C++",
"bytes": "2658"
},
{
"name": "Python",
"bytes": "2127774"
},
{
"name": "Ragel",
"bytes": "104506"
},
{
"name": "Shell",
"bytes": "454354"
}
],
"symlink_target": ""
} |
"""Discover Netgear routers."""
from netdisco.util import urlparse
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Netgear routers."""
def info_from_entry(self, entry):
"""Return the most important info from a uPnP entry."""
url = urlparse(entry.values['location'])
return (entry.description['device']['modelNumber'], url.hostname)
def get_entries(self):
"""Get all the Netgear uPnP entries."""
return self.find_by_device_description({
"manufacturer": "NETGEAR, Inc.",
"deviceType": "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
})
| {
"content_hash": "8ffcec99b9fcbdcc9717ce9bc039858d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 34.05,
"alnum_prop": 0.657856093979442,
"repo_name": "brburns/netdisco",
"id": "a0e955eded131c82dc13c8a97077be86490888db",
"size": "681",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "netdisco/discoverables/netgear_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58525"
}
],
"symlink_target": ""
} |
from pyparsing import OneOrMore, stringEnd, ParseException
from pyzebos.parsers.config.routemap import routeMap
from pyzebos.parsers.common import zebosComment
import os
routeMapParser = OneOrMore(routeMap ^ zebosComment) + stringEnd
config_path = os.path.join(os.getcwd(), 'tests', 'configurations', 'route-map')
def test_routemap_parse_ok():
for (dirpath, dirnames, filenames) in os.walk(config_path):
for filename in filenames:
config_filename = os.path.join(dirpath, filename)
try:
tokens = routeMapParser.parseFile(config_filename)
except ParseException as pe:
print "Failed parsing file: {}".format(config_filename)
print "Line: {} column: {} message: {}".format(pe.lineno, pe.col, pe.msg)
print pe.line
print " " * (pe.col - 1) + "^"
raise AssertionError
| {
"content_hash": "7bb7d1f29225c21ce9aff3250ebaab19",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 89,
"avg_line_length": 43.285714285714285,
"alnum_prop": 0.6391639163916392,
"repo_name": "atopuzov/pyzebos",
"id": "8e91d7f96abb4ab7f85fdd94f1f47f0ae88bf76a",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_full_routemap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "166940"
}
],
"symlink_target": ""
} |
from io import BytesIO
from pg8000.core import (
Context,
CoreConnection,
NULL_BYTE,
PASSWORD,
_create_message,
)
def test_handle_AUTHENTICATION_3(mocker):
"""Shouldn't send a FLUSH message, as FLUSH only used in extended-query"""
mocker.patch.object(CoreConnection, "__init__", lambda x: None)
con = CoreConnection()
password = "barbour".encode("utf8")
con.password = password
con._flush = mocker.Mock()
buf = BytesIO()
con._write = buf.write
CoreConnection.handle_AUTHENTICATION_REQUEST(con, b"\x00\x00\x00\x03", None)
assert buf.getvalue() == _create_message(PASSWORD, password + NULL_BYTE)
def test_create_message():
msg = _create_message(PASSWORD, "barbour".encode("utf8") + NULL_BYTE)
assert msg == b"p\x00\x00\x00\x0cbarbour\x00"
def test_handle_ERROR_RESPONSE(mocker):
"""Check it handles invalid encodings in the error messages"""
mocker.patch.object(CoreConnection, "__init__", lambda x: None)
con = CoreConnection()
con._client_encoding = "utf8"
data = b"S\xc2err" + NULL_BYTE + NULL_BYTE
context = Context(None)
CoreConnection.handle_ERROR_RESPONSE(con, data, context)
assert str(context.error) == "{'S': '�err'}"
| {
"content_hash": "ac3408b6467c72c6c7368af12cdcaf57",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 30.875,
"alnum_prop": 0.6704453441295547,
"repo_name": "tlocke/pg8000",
"id": "c44caa3a3f2cb6c9c9c7cc9354673db3a70b74b5",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/native/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "331262"
}
],
"symlink_target": ""
} |
import unittest
import os
from EXOSIMS.Prototypes import TargetList
import numpy as np
from astropy import units as u
from astropy.time import Time
from tests.TestSupport.Info import resource_path
from tests.TestSupport.Utilities import RedirectStreams
import json
import copy
r"""TargetList module unit tests
Paul Nunez, JPL, Aug. 2016
"""
# scriptfile = resource_path("test-scripts/template_prototype_testing.json")
class Test_TargetList_prototype(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w")
self.script = resource_path("test-scripts/template_minimal.json")
with open(self.script) as f:
self.spec = json.loads(f.read())
self.spec["ntargs"] = 10 # generate fake targets list with 10 stars
def tearDown(self):
self.dev_null.close()
def getTL(self, addkeys=None):
spec = copy.deepcopy(self.spec)
if addkeys:
for key in addkeys:
spec[key] = addkeys[key]
# quiet the chatter at initialization
with RedirectStreams(stdout=self.dev_null):
self.targetlist = TargetList.TargetList(**spec)
self.opticalsystem = self.targetlist.OpticalSystem
self.planetpop = self.targetlist.PlanetPopulation
def test_nan_filter(self):
self.getTL()
# First ensure that application of nan_filter initially does nothing
n0 = len(self.targetlist.Vmag)
self.targetlist.nan_filter()
self.assertEqual(len(self.targetlist.Name), n0)
# Introduce one nan value and check that it is removed
self.targetlist.Vmag[2] = float("nan")
self.targetlist.nan_filter()
self.assertEqual(len(self.targetlist.Name), n0 - 1)
# insert another nan for testing
self.targetlist.dist[0] = float("nan")
self.targetlist.nan_filter()
self.assertEqual(len(self.targetlist.Name), n0 - 2)
def test_binary_filter(self):
self.getTL()
n0 = self.targetlist.nStars
# adding 3 binaries
self.targetlist.Binary_Cut[1] = True
self.targetlist.Binary_Cut[3] = True
self.targetlist.Binary_Cut[5] = True
self.targetlist.binary_filter()
n1 = self.targetlist.nStars
# 3 binaries should be removed
self.assertEqual(n1, n0 - 3)
def test_outside_IWA_filter(self):
self.getTL()
# initial application of IWA filter should do nothing
n0 = self.targetlist.nStars
self.targetlist.outside_IWA_filter()
self.assertEqual(n0, self.targetlist.nStars)
# Test filtering (need to have different distances)
self.targetlist.dist = np.linspace(1, 5, self.targetlist.nStars) * u.pc
self.opticalsystem.IWA = 0.5 * u.arcsec
n_expected = len(
np.where(
np.tan(0.5 * u.arcsec) * self.targetlist.dist
< np.max(self.planetpop.rrange)
)[0]
)
self.targetlist.outside_IWA_filter()
self.assertEqual(self.targetlist.nStars, n_expected)
# now test with scaleOrbits
self.planetpop.scaleOrbits = True
self.targetlist.L[0] = 1e-3
self.targetlist.outside_IWA_filter()
self.assertEqual(self.targetlist.nStars, n_expected - 1)
# Test limiting case where everything would be removed
self.opticalsystem.IWA = 100 * u.arcsec
with self.assertRaises(IndexError):
self.targetlist.outside_IWA_filter()
def test_vis_mag_filter(self):
self.getTL()
# initial application of filter should do nothing
n0 = self.targetlist.nStars
self.targetlist.vis_mag_filter(np.inf)
self.assertEqual(n0, self.targetlist.nStars)
# now populate different Vmags
self.targetlist.Vmag[0] = 9
self.targetlist.Vmag[5] = 10
self.targetlist.vis_mag_filter(5)
self.assertEqual(self.targetlist.nStars, n0 - 2)
# Test limiting case
with self.assertRaises(IndexError):
self.targetlist.vis_mag_filter(-1)
def test_dmag_filter(self):
self.getTL()
# test initial null filter
n0 = self.targetlist.nStars
self.targetlist.intCutoff_dMag = np.repeat(np.inf, self.targetlist.nStars)
self.targetlist.max_dmag_filter()
self.assertEqual(n0, self.targetlist.nStars)
# Test removing single target
self.targetlist.intCutoff_dMag[0] = 0
self.targetlist.max_dmag_filter()
self.assertEqual(self.targetlist.nStars, n0 - 1)
# Test limiting case of intCutoff_dMag = 0
self.targetlist.intCutoff_dMag = np.repeat(0.0, self.targetlist.nStars)
with self.assertRaises(IndexError):
self.targetlist.max_dmag_filter()
# Test limiting case that distance to a star is (effectively) infinite
# turmon: changed from inf to 1e8 because inf causes a confusing RuntimeWarning
self.targetlist.intCutoff_dMag = np.repeat(30, self.targetlist.nStars)
self.planetpop.rrange = np.array([1e8, 1e8]) * u.AU
with self.assertRaises(IndexError):
self.targetlist.max_dmag_filter()
def test_completeness_filter(self):
self.getTL()
n0 = self.targetlist.nStars
self.targetlist.completeness_filter()
self.assertEqual(self.targetlist.nStars, n0)
# test removing one target
self.targetlist.intCutoff_comp[0] = self.targetlist.Completeness.minComp / 2
self.targetlist.completeness_filter()
self.assertEqual(self.targetlist.nStars, n0 - 1)
# Test limiting case of minComp = 1.0
self.targetlist.Completeness.minComp = 1.0
with self.assertRaises(IndexError):
self.targetlist.completeness_filter()
def test_life_expectancy_filter(self):
self.getTL()
# test default removal of BV < 0.3 (hard-coded)
n0 = self.targetlist.nStars
self.targetlist.BV = np.repeat(0.5, self.targetlist.nStars)
self.targetlist.life_expectancy_filter()
self.assertEqual(n0, self.targetlist.nStars)
# Test removing single target
self.targetlist.BV[0] = 0
self.targetlist.life_expectancy_filter()
self.assertEqual(self.targetlist.nStars, n0 - 1)
# test remove all
self.targetlist.BV = np.repeat(0, self.targetlist.nStars)
with self.assertRaises(IndexError):
self.targetlist.life_expectancy_filter()
def test_main_sequence_filter(self):
self.getTL()
# initial application should do nothing
n0 = self.targetlist.nStars
self.targetlist.main_sequence_filter()
self.assertEqual(n0, self.targetlist.nStars)
# test remove one
self.targetlist.BV[0] = 10
self.targetlist.MV[0] = 10
self.targetlist.main_sequence_filter()
self.assertEqual(n0 - 1, self.targetlist.nStars)
# test remove all
self.targetlist.BV = np.repeat(10, self.targetlist.nStars)
self.targetlist.MV = np.repeat(10, self.targetlist.nStars)
with self.assertRaises(IndexError):
self.targetlist.main_sequence_filter()
def test_stellar_mass(self):
self.getTL()
# Test with absolute magnitue of the sun
self.targetlist.MV = np.array([4.83])
self.targetlist.stellar_mass()
# Should give 1 solar mass approximately
np.testing.assert_allclose(
self.targetlist.MsEst[0], 1.05865 * u.solMass, rtol=1e-5, atol=0
)
# Relative tolerance is 0.07
np.testing.assert_allclose(
self.targetlist.MsTrue[0], 1.05865 * u.solMass, rtol=0.07, atol=0
)
def test_fgk_filter(self):
self.getTL()
n0 = self.targetlist.nStars
self.targetlist.fgk_filter()
self.assertEqual(n0, self.targetlist.nStars)
# check remove 1
self.targetlist.Spec[0] = "B0II"
self.targetlist.fgk_filter()
self.assertEqual(n0 - 1, self.targetlist.nStars)
def test_revise_lists(self):
self.getTL()
# Check that passing all indices does not change list
# and that coordinates are in degrees
i0 = range(len(self.targetlist.Name))
self.targetlist.revise_lists(i0)
self.assertEqual(len(i0), len(self.targetlist.Name))
# Check to see that only 3 elements are retained
i1 = np.array([1, 5, 8])
self.targetlist.revise_lists(i1)
self.assertEqual(len(i1), len(self.targetlist.Name))
# Check to see that passing no indices yields an emply list
i2 = []
with self.assertRaises(IndexError):
self.targetlist.revise_lists(i2)
def test_fillPhotometry(self):
"""
Filling in photometry should result in no nulls in Imag
"""
self.getTL(addkeys={"fillPhotometry": True})
self.assertTrue(self.targetlist.fillPhotometry)
self.assertTrue(
np.all(self.targetlist.Imag != 0)
and np.all(~np.isnan(self.targetlist.Imag))
)
def test_completeness_specs(self):
"""
Test completeness_specs logic
"""
self.getTL()
# test case where no completeness specs given
self.assertEqual(
self.targetlist.PlanetPopulation.__class__.__name__,
self.targetlist.Completeness.PlanetPopulation.__class__.__name__,
)
# test case where completeness specs given
self.getTL(
addkeys={
"completeness_specs": {
"modules": {
"PlanetPopulation": "EarthTwinHabZone1",
"PlanetPhysicalModel": "PlanetPhysicalModel",
}
}
}
)
self.assertNotEqual(
self.targetlist.PlanetPopulation.__class__.__name__,
self.targetlist.Completeness.PlanetPopulation.__class__.__name__,
)
def test_starprop(self):
"""
Test starprop outputs
"""
self.getTL()
# setting up 1-dim and multi-dim arrays
timeRange = np.arange(2000.5, 2019.5, 5) # 1x4 time array
# 1x5 time array, same size as sInds later
timeRangeEql = np.linspace(2000.5, 2019.5, 5)
# time Quantity arrays
t_ref = Time(timeRange[0], format="jyear") # 1x1 time array
t_refArray = Time(timeRange, format="jyear") # 1x4 time array
# 1x5 time array, equal to sInds size
t_refEqual = Time(timeRangeEql, format="jyear")
# 1x5 time array, all elements are equal
t_refCopy = Time(np.tile(timeRange[0], 5), format="jyear")
# sInd arrays
sInd = np.array([0])
sInds = np.array([0, 1, 2, 3, 4])
# testing Static Stars (set up as a default)
r_targSSBothSingle = self.targetlist.starprop(sInd, t_ref) # should be 1x3
r_targSSMultSinds = self.targetlist.starprop(sInds, t_ref) # should be 5x3
r_targSSMultBoth = self.targetlist.starprop(
sInds, t_refArray
) # should be 5x4x3
r_targSSEqualBoth = self.targetlist.starprop(sInds, t_refEqual) # should be 5x3
r_targSSCopyTimes = self.targetlist.starprop(
sInd, t_refCopy
) # should be 1x3 (equal defaults to 1)
self.assertEqual(r_targSSBothSingle.shape, (1, 3))
self.assertEqual(r_targSSMultSinds.shape, (sInds.size, 3))
self.assertEqual(r_targSSMultBoth.shape, (t_refArray.size, sInds.size, 3))
self.assertEqual(r_targSSEqualBoth.shape, (sInds.size, 3))
self.assertEqual(r_targSSCopyTimes.shape, (1, 3))
# testing without Static Stars
self.targetlist.starprop_static = None
r_targBothSingle = self.targetlist.starprop(sInd, t_ref)
r_targMultSinds = self.targetlist.starprop(sInds, t_ref)
r_targMultTimes = self.targetlist.starprop(sInd, t_refArray) # should be 5x3
r_targMultBoth = self.targetlist.starprop(sInds, t_refArray)
r_targEqualBoth = self.targetlist.starprop(sInds, t_refEqual)
r_targCopyTimes = self.targetlist.starprop(sInd, t_refCopy)
self.assertEqual(r_targBothSingle.shape, (1, 3))
self.assertEqual(r_targMultSinds.shape, (sInds.size, 3))
self.assertEqual(r_targMultTimes.shape, (t_refArray.size, 3))
self.assertEqual(r_targMultBoth.shape, (t_refArray.size, sInds.size, 3))
self.assertEqual(r_targEqualBoth.shape, (sInds.size, 3))
self.assertEqual(r_targCopyTimes.shape, (1, 3))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c95f3e14ed61a417c734cd32ef84e275",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 88,
"avg_line_length": 36.456896551724135,
"alnum_prop": 0.6274927090722787,
"repo_name": "dsavransky/EXOSIMS",
"id": "57657fc232dd0f2fb5462a042b5d6575421b98ee",
"size": "12687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/Prototypes/test_TargetList.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
} |
from facefit import cascade
from facefit.pixel_extractor import PixelExtractorBuilder
from tree import RegressionTreeBuilder
from forest import RegressionForestBuilder
class ERTBuilder(cascade.CascadedShapeRegressorBuilder):
def __init__(self, n_landmarks=68, n_stages=10, n_trees=500, tree_depth=5, n_candidate_splits=20,
exponential_prior=True, n_perturbations=20, n_pixels=400, kappa=0.3, MU=0.1):
feature_extractor_builder = PixelExtractorBuilder(n_landmarks=n_landmarks, n_pixels=n_pixels, kappa=kappa)
tree_builder = RegressionTreeBuilder(depth=tree_depth, n_test_features=n_candidate_splits,
exponential_prior=exponential_prior, MU=MU)
forest_builder = RegressionForestBuilder(n_trees=n_trees, tree_builder=tree_builder,
feature_extractor_builder=feature_extractor_builder)
super(self.__class__, self).__init__(n_stages=n_stages, n_perturbations=n_perturbations,
weak_builder=forest_builder)
| {
"content_hash": "710248d3c2088dbbee768155176a2845",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 114,
"avg_line_length": 57.68421052631579,
"alnum_prop": 0.666058394160584,
"repo_name": "AndrejMaris/facefit",
"id": "359cead55725c468be5eea7fdf4a247f8eab6fa5",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facefit/ert/builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46544"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from datetime import datetime, time
import numpy as np
import pytest
from pandas.compat import product
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, MultiIndex, Series, Timestamp, date_range,
period_range, to_datetime)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_index_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis0(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
result = df.diff(axis=0)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']),
1: pd.TimedeltaIndex(['NaT', '1 days'])})
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis1(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
if tz is None:
result = df.diff(axis=1)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']),
1: pd.TimedeltaIndex(['0 days',
'0 days'])})
assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
@pytest.mark.parametrize("freq, periods, fill_method, limit",
[('5B', 5, None, None),
('3B', 3, None, None),
('3B', 3, 'bfill', None),
('7B', 7, 'pad', 1),
('7B', 7, 'bfill', 3),
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.tsframe.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = self.tsframe.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index,
columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = empty_ts.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_append_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(self.tsframe)
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
msg = 'does not match PeriodIndex freq'
with pytest.raises(ValueError, match=msg):
ps.shift(freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH #24128
df = DataFrame([1, 2, 3, 4, 5],
index=date_range('1/1/2000', periods=5, freq='H'))
exp = DataFrame([0, 1, 2, 3, 4],
index=date_range('1/1/2000', periods=5, freq='H'))
result = df.shift(1, fill_value=0)
assert_frame_equal(result, exp)
exp = DataFrame([0, 0, 1, 2, 3],
index=date_range('1/1/2000', periods=5, freq='H'))
result = df.shift(2, fill_value=0)
assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH 9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
assert_series_equal(nulls, Series(range(1, 6), dtype='int64'))
# check all answers are the same
assert_frame_equal(shifted[0], shifted[1])
assert_frame_equal(shifted[0], shifted[2])
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
with pytest.raises(ValueError, match='does not match'):
ps.tshift(freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
pytest.raises(ValueError, ts.truncate,
before=ts.index[-1] - ts.index.freq,
after=ts.index[0] + ts.index.freq)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
assert not (self.tsframe.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']},
index=[5, 3, 2, 9, 0])
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
df.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.DataFrame({'A': np.random.randn(len(rng)),
'B': np.random.randn(len(rng))},
index=rng)
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
ts.sort_values('A', ascending=False).truncate(before='2011-11',
after='2011-12')
df = pd.DataFrame({3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5)},
columns=[3, 20, 2, 0])
msg = 'truncate requires a sorted index'
with pytest.raises(ValueError, match=msg):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
assert isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize("data,idx,expected_first,expected_last", [
({'A': [1, 2, 3]}, [1, 1, 2], 1, 2),
({'A': [1, 2, 3]}, [1, 2, 2], 1, 2),
({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'),
({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
N = len(self.frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq='D')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_frame_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_frame_equal(result, expected)
result = ts[:0].first('3M')
assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first('1D')
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq='D')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['2000-01-10':]
assert_frame_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_frame_equal(result, expected)
result = ts[:0].last('3M')
assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last('1D')
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time('00:00')
@pytest.mark.parametrize('axis', ['index', 'columns', 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ['index', 0]:
expected = ts.loc[indices, :]
elif axis in ['columns', 1]:
expected = ts.loc[:, indices]
result = ts.at_time('9:30', axis=axis)
assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time='00:00', end_time='12:00')
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range('1/1/2000', periods=100, freq='10min')
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ('08:00:00', '09:00:00')
exp_len = 7
if axis in ['index', 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ['columns', 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range('1/1/2000', periods=100, freq='10min')
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ('08:00:00', '09:00:00')
if axis in ['columns', 1]:
ts.index = mask
pytest.raises(TypeError, ts.between_time, stime, etime)
pytest.raises(TypeError, ts.between_time, stime, etime, axis=0)
if axis in ['index', 0]:
ts.columns = mask
pytest.raises(TypeError, ts.between_time, stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(np.random.randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
pytest.raises(ValueError, df.to_period, axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
l0 = date_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
assert not df3.index.levels[0].equals(l0)
assert_index_equal(df3.index.levels[0], l0_expected)
assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)('US/Pacific', level=1)
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match='DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match='DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with pytest.raises(ValueError, match='not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
| {
"content_hash": "ced1d854eb06df19f1cc06c3ac316a7f",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 79,
"avg_line_length": 35.74804905239688,
"alnum_prop": 0.531279236574565,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "bc37317f72802d0868e2923c129f2327ca1512a1",
"size": "32091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/frame/test_timeseries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
import requests
STEPIC_URL = 'https://stepic.org/'
STEPIC_API_URL = STEPIC_URL + 'api/'
STEPIC_OAUTH_TOKEN_URL = STEPIC_URL + 'oauth2/token/'
STEPIC_API_ATTEMPTS_URL = STEPIC_API_URL + 'attempts'
STEPIC_API_ATTEMPT_URL = STEPIC_API_ATTEMPTS_URL + '/{id}'
class LoginError(Exception):
"""An exception raised when login failed."""
class StepicError(Exception):
"""An error occurred on the Stepic side."""
class StepicClient(object):
def __init__(self, client_id, client_secret):
# Get an OAuth token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
response = requests.post(STEPIC_OAUTH_TOKEN_URL,
data={'grant_type': 'client_credentials'},
auth=auth)
self.token = response.json()['access_token']
auth_headers = {'Authorization': 'Bearer ' + self.token}
self.session = requests.Session()
self.session.headers.update(auth_headers)
def get_attempt(self, attempt_id):
api_url = STEPIC_API_ATTEMPT_URL.format(id=attempt_id)
response = self.session.get(api_url)
if response.status_code == 404:
return None
response.raise_for_status()
response_data = response.json()
attempt = response_data['attempts'][0]
return attempt
def create_attempt(self, step_id):
data = {'attempt': {'step': step_id}}
response = self.session.post(STEPIC_API_ATTEMPTS_URL, json=data)
response.raise_for_status()
resp_json = response.json()
if not resp_json['attempts']:
raise StepicError("Stepic didn't return an attempt")
return resp_json['attempts'][0]
| {
"content_hash": "377b1c03eef71f1a522329e29f66b151",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 35.708333333333336,
"alnum_prop": 0.6190198366394399,
"repo_name": "rev112/playterminal",
"id": "0b0ac4118e8361ec97df45ebbc80e682bb88eb2a",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "games/utils/stepic_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1310"
},
{
"name": "HTML",
"bytes": "3248"
},
{
"name": "JavaScript",
"bytes": "168977"
},
{
"name": "Makefile",
"bytes": "378"
},
{
"name": "Python",
"bytes": "18200"
},
{
"name": "Shell",
"bytes": "9468"
}
],
"symlink_target": ""
} |
import json
from typing import Any, Dict
from sqlalchemy.orm import Session
from superset.models.slice import Slice
def import_chart(
session: Session, config: Dict[str, Any], overwrite: bool = False
) -> Slice:
existing = session.query(Slice).filter_by(uuid=config["uuid"]).first()
if existing:
if not overwrite:
return existing
config["id"] = existing.id
# TODO (betodealmeida): move this logic to import_from_dict
config["params"] = json.dumps(config["params"])
chart = Slice.import_from_dict(session, config, recursive=False)
if chart.id is None:
session.flush()
return chart
| {
"content_hash": "da0298a6c94af04ec520fff4e0bcd06d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 26.2,
"alnum_prop": 0.6717557251908397,
"repo_name": "mistercrunch/panoramix",
"id": "b3d4237f2b5daf3079b652279b84b64112fcde2e",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/charts/commands/importers/v1/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "46750"
},
{
"name": "HTML",
"bytes": "34140"
},
{
"name": "JavaScript",
"bytes": "81606"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "240195"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
"""Texture (ERB-spaced) stimulus generation functions."""
# adapted (with permission) from code by Hari Bharadwaj
import numpy as np
import warnings
from ._stimuli import rms, window_edges
from .._fixes import irfft
def _cams(f):
"""Compute cams."""
return 21.4 * np.log10(0.00437 * f + 1)
def _inv_cams(E):
"""Compute cams inverse."""
return (10 ** (E / 21.4) - 1.) / 0.00437
def _scale_sound(x):
"""Scale appropriately to between +/- 1."""
return 0.95 * x / np.max(np.abs(x))
def _make_narrow_noise(bw, f_c, dur, fs, ramp_dur, rng):
"""Make narrow-band noise using FFT."""
f_min, f_max = f_c - bw / 2., f_c + bw / 2.
t = np.arange(int(round(dur * fs))) / fs
# Make Noise
f_step = 1. / dur # Frequency bin size
h_min = int(np.ceil(f_min / f_step))
h_max = int(np.floor(f_max / f_step)) + 1
phase = rng.rand(h_max - h_min) * 2 * np.pi
noise = np.zeros(len(t) // 2 + 1, np.complex128)
noise[h_min:h_max] = np.exp(1j * phase)
return window_edges(irfft(noise)[:len(t)], fs, ramp_dur, window='dpss')
def texture_ERB(n_freqs=20, n_coh=None, rho=1., seq=('inc', 'nb', 'inc', 'nb'),
fs=24414.0625, dur=1., SAM_freq=7., random_state=None,
freq_lims=(200, 8000), verbose=True):
"""Create ERB texture stimulus
Parameters
----------
n_freqs : int
Number of tones in mixture (default 20).
n_coh : int | None
Number of tones to be temporally coherent. Default (None) is
``int(np.round(n_freqs * 0.8))``.
rho : float
Correlation between the envelopes of grouped tones (default is 1.0).
seq : list
Sequence of incoherent ('inc'), coherent noise envelope ('nb'), and
SAM ('sam') mixtures. Default is ``('inc', 'nb', 'inc', 'nb')``.
fs : float
Sampling rate in Hz.
dur : float
Duration (in seconds) of each token in seq (default is 1.0).
SAM_freq : float
The SAM frequency to use.
random_state : None | int | np.random.RandomState
The random generator state used for band selection and noise
envelope generation.
freq_lims : tuple
The lower and upper frequency limits (default is (200, 8000)).
verbose : bool
If True, print the resulting ERB spacing.
Returns
-------
x : ndarray, shape (n_samples,)
The stimulus, where ``n_samples = len(seq) * (fs * dur)``
(approximately).
Notes
-----
This function requires MNE.
"""
from mne.time_frequency.multitaper import dpss_windows
from mne.utils import check_random_state
if not isinstance(seq, (list, tuple, np.ndarray)):
raise TypeError('seq must be list, tuple, or ndarray, got %s'
% type(seq))
known_seqs = ('inc', 'nb', 'sam')
for si, s in enumerate(seq):
if s not in known_seqs:
raise ValueError('all entries in seq must be one of %s, got '
'seq[%s]=%s' % (known_seqs, si, s))
fs = float(fs)
rng = check_random_state(random_state)
n_coh = int(np.round(n_freqs * 0.8)) if n_coh is None else n_coh
rise = 0.002
t = np.arange(int(round(dur * fs))) / fs
f_min, f_max = freq_lims
n_ERBs = _cams(f_max) - _cams(f_min)
del f_max
spacing_ERBs = n_ERBs / float(n_freqs - 1)
if verbose:
print('This stim will have successive tones separated by %2.2f ERBs'
% spacing_ERBs)
if spacing_ERBs < 1.0:
warnings.warn('The spacing between tones is LESS THAN 1 ERB!')
# Make a filter whose impulse response is purely positive (to avoid phase
# jumps) so that the filtered envelope is purely positive. Use a DPSS
# window to minimize sidebands. For a bandwidth of bw, to get the shortest
# filterlength, we need to restrict time-bandwidth product to a minimum.
# Thus we need a length*bw = 2 => length = 2/bw (second). Hence filter
# coefficients are calculated as follows:
b = dpss_windows(int(np.floor(2 * fs / 100.)), 1., 1)[0][0]
b -= b[0]
b /= b.sum()
# Incoherent
envrate = 14
bw = 20
incoh = 0.
for k in range(n_freqs):
f = _inv_cams(_cams(f_min) + spacing_ERBs * k)
env = _make_narrow_noise(bw, envrate, dur, fs, rise, rng)
env[env < 0] = 0
env = np.convolve(b, env)[:len(t)]
incoh += _scale_sound(window_edges(
env * np.sin(2 * np.pi * f * t), fs, rise, window='dpss'))
incoh /= rms(incoh)
# Coherent (noise band)
stims = dict(inc=0., nb=0., sam=0.)
group = np.sort(rng.permutation(np.arange(n_freqs))[:n_coh])
for kind in known_seqs:
if kind == 'nb': # noise band
env_coh = _make_narrow_noise(bw, envrate, dur, fs, rise, rng)
else: # 'nb' or 'inc'
env_coh = 0.5 + np.sin(2 * np.pi * SAM_freq * t) / 2.
env_coh = window_edges(env_coh, fs, rise, window='dpss')
env_coh[env_coh < 0] = 0
env_coh = np.convolve(b, env_coh)[:len(t)]
if kind == 'inc':
use_group = [] # no coherent ones
else: # 'nb' or 'sam'
use_group = group
for k in range(n_freqs):
f = _inv_cams(_cams(f_min) + spacing_ERBs * k)
env_inc = _make_narrow_noise(bw, envrate, dur, fs, rise, rng)
env_inc[env_inc < 0] = 0.
env_inc = np.convolve(b, env_inc)[:len(t)]
if k in use_group:
env = np.sqrt(rho) * env_coh + np.sqrt(1 - rho ** 2) * env_inc
else:
env = env_inc
stims[kind] += _scale_sound(window_edges(
env * np.sin(2 * np.pi * f * t), fs, rise, window='dpss'))
stims[kind] /= rms(stims[kind])
stim = np.concatenate([stims[s] for s in seq])
stim = 0.01 * stim / rms(stim)
return stim
| {
"content_hash": "104e0b04931638424ed880413d129183",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 36.64375,
"alnum_prop": 0.5628517823639775,
"repo_name": "LABSN/expyfun",
"id": "bff7c1cda26a97478e38d9ebb770318ce23b03bb",
"size": "5910",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "expyfun/stimuli/_texture.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1433"
},
{
"name": "PowerShell",
"bytes": "895"
},
{
"name": "Python",
"bytes": "589852"
}
],
"symlink_target": ""
} |
import quickfix
import copy
import uuid
import random
import datetime
import yaml
from twisted.internet import task
from sim import (FixSimError, FixSimApplication, create_fix_version,
instance_safe_call, create_logger, IncrementID, load_yaml)
class Subscription(object):
def __init__(self, symbol):
super(Subscription, self).__init__()
self.symbol = symbol
self.currency = self.symbol.split("/")[0]
def __repr__(self):
return "<Subscription %s>" % self.symbol
class Subscriptions(object):
def __init__(self):
self.subscriptions = {}
def add(self, subscription):
if subscription.symbol in self.subscriptions:
raise KeyError("Subscription for symbol has already exist")
self.subscriptions[subscription.symbol] = subscription
def get(self, symbol):
subscription = self.subscriptions.get(symbol, None)
return subscription
def __iter__(self):
return self.subscriptions.values().__iter__()
class OrderBook(object):
def __init__(self):
self.quotes = []
def setSnapshot(self, snaphot):
raise NotImplementedError()
def __iter__(self):
return self.quotes.__iter__()
def get(self, quoteID):
for quote in self.quotes:
if quote.id == quoteID:
return quote
return None
class IDGenerator(object):
def __init__(self):
self._orderID = IncrementID()
self._reqID = IncrementID()
def orderID(self):
return self._orderID.generate()
def reqID(self):
return self._reqID.generate()
def create_initiator(initiator_config, simulation_config):
def create_subscriptions(instruments):
result = Subscriptions()
for instrument in instruments:
subscription = Subscription(instrument['symbol'])
result.add(subscription)
return result
settings = quickfix.SessionSettings(initiator_config)
config = load_yaml(simulation_config)
fix_version = create_fix_version(config)
subscriptions = create_subscriptions(config['instruments'])
logger = create_logger(config)
subscribe_interval = config.get('subscribe_interval', 1)
skip_snapshot_chance = config.get('skip_snapshot_chance', 0)
application = Client(fix_version, logger, skip_snapshot_chance, subscribe_interval, subscriptions)
storeFactory = quickfix.FileStoreFactory(settings)
logFactory = quickfix.ScreenLogFactory(settings)
initiator = quickfix.SocketInitiator(application, storeFactory, settings, logFactory)
return initiator
class Snapshot(object):
def __init__(self, symbol):
self.symbol = symbol
self.bid = []
self.ask = []
def getRandomQuote(self):
is_bid = random.randrange(0, 2)
if is_bid:
quotes = self.bid
else:
quotes = self.ask
quote = random.choice(quotes)
return quote
def addBid(self, quote):
quote.side = Quote.SELL
self.bid.append(quote)
def addAsk(self, quote):
quote.side = quote.BUY
self.ask.append(quote)
def __repr__(self):
return "Snapshot %s\n BID: %s\n ASK: %s" % (self.symbol, self.bid, self.ask)
class Quote(object):
SELL = '2'
BUY = '1'
def __init__(self):
super(Quote, self).__init__()
self.side = None
self.symbol = None
self.currency = None
self.price = None
self.size = None
self.id = None
def __repr__(self):
return "(%s %s %s, %s)" % (str(self.id), self.side, str(self.price), str(self.size))
class Client(FixSimApplication):
MKD_TOKEN = "MKD"
def __init__(self, fixVersion, logger, skipSnapshotChance, subscribeInterval, subscriptions):
super(Client, self).__init__(fixVersion, logger)
self.skipSnapshotChance = skipSnapshotChance
self.subscribeInterval = subscribeInterval
self.subscriptions = subscriptions
self.orderSession = None
self.marketSession = None
self.idGen = IDGenerator()
self.loop = task.LoopingCall(self.subscribe)
self.loop.start(self.subscribeInterval, True)
def onCreate(self, sessionID):
pass
def onLogon(self, sessionID):
sid = str(sessionID)
# print "ON LOGON sid", sid
if sid.find(self.MKD_TOKEN) != -1:
self.marketSession = sessionID
self.logger.info("FIXSIM-CLIENT MARKET SESSION %s", self.marketSession)
else:
self.orderSession = sessionID
self.logger.info("FIXSIM-CLIENT ORDER SESSION %s", self.orderSession)
def onLogout(self, sessionID):
# print "ON LOGOUT"
return
def toAdmin(self, sessionID, message):
# print "TO ADMIN", message
return
def fromAdmin(self, sessionID, message):
# print "FROM ADMIN"
return
def toApp(self, sessionID, message):
# print "TO APP"
return
def subscribe(self):
if self.marketSession is None:
self.logger.info("FIXSIM-CLIENT Market session is none, skip subscribing")
return
for subscription in self.subscriptions:
message = self.fixVersion.MarketDataRequest()
message.setField(quickfix.MDReqID(self.idGen.reqID()))
message.setField(quickfix.SubscriptionRequestType(quickfix.SubscriptionRequestType_SNAPSHOT_PLUS_UPDATES))
message.setField(quickfix.MDUpdateType(quickfix.MDUpdateType_FULL_REFRESH))
message.setField(quickfix.MarketDepth(0))
message.setField(quickfix.MDReqID(self.idGen.reqID()))
relatedSym = self.fixVersion.MarketDataRequest.NoRelatedSym()
relatedSym.setField(quickfix.Product(quickfix.Product_CURRENCY))
relatedSym.setField(quickfix.SecurityType(quickfix.SecurityType_FOREIGN_EXCHANGE_CONTRACT))
relatedSym.setField(quickfix.Symbol(subscription.symbol))
message.addGroup(relatedSym)
group = self.fixVersion.MarketDataRequest.NoMDEntryTypes()
group.setField(quickfix.MDEntryType(quickfix.MDEntryType_BID))
message.addGroup(group)
group.setField(quickfix.MDEntryType(quickfix.MDEntryType_BID))
message.addGroup(group)
self.sendToTarget(message, self.marketSession)
def onMarketDataSnapshotFullRefresh(self, message, sessionID):
skip_chance = random.choice(range(1, 101))
if self.skipSnapshotChance > skip_chance:
self.logger.info("FIXSIM-CLIENT onMarketDataSnapshotFullRefresh skip making trade with random choice %d", skip_chance)
return
fix_symbol = quickfix.Symbol()
message.getField(fix_symbol)
symbol = fix_symbol.getValue()
snapshot = Snapshot(symbol)
group = self.fixVersion.MarketDataSnapshotFullRefresh.NoMDEntries()
fix_no_entries = quickfix.NoMDEntries()
message.getField(fix_no_entries)
no_entries = fix_no_entries.getValue()
for i in range(1, no_entries + 1):
message.getGroup(i, group)
price = quickfix.MDEntryPx()
size = quickfix.MDEntrySize()
currency = quickfix.Currency()
quote_id = quickfix.QuoteEntryID()
group.getField(quote_id)
group.getField(currency)
group.getField(price)
group.getField(size)
quote = Quote()
quote.price = price.getValue()
quote.size = size.getValue()
quote.currency = currency.getValue()
quote.id = quote_id.getValue()
fix_entry_type = quickfix.MDEntryType()
group.getField(fix_entry_type)
entry_type = fix_entry_type.getValue()
if entry_type == quickfix.MDEntryType_BID:
snapshot.addBid(quote)
elif entry_type == quickfix.MDEntryType_OFFER:
snapshot.addAsk(quote)
else:
raise RuntimeError("Unknown entry type %s" % str(entry_type))
self.makeOrder(snapshot)
def makeOrder(self, snapshot):
self.logger.info("FIXSIM-CLIENT Snapshot received %s", str(snapshot))
quote = snapshot.getRandomQuote()
self.logger.info("FIXSIM-CLIENT make order for quote %s", str(quote))
order = self.fixVersion.NewOrderSingle()
order.setField(quickfix.HandlInst(quickfix.HandlInst_AUTOMATED_EXECUTION_ORDER_PUBLIC_BROKER_INTERVENTION_OK))
order.setField(quickfix.SecurityType(quickfix.SecurityType_FOREIGN_EXCHANGE_CONTRACT))
order.setField(quickfix.OrdType(quickfix.OrdType_PREVIOUSLY_QUOTED))
order.setField(quickfix.ClOrdID(self.idGen.orderID()))
order.setField(quickfix.QuoteID(quote.id))
order.setField(quickfix.SecurityDesc("SPOT"))
order.setField(quickfix.Symbol(snapshot.symbol))
order.setField(quickfix.Currency(quote.currency))
order.setField(quickfix.Side(quote.side))
order.setField(quickfix.OrderQty(quote.size))
order.setField(quickfix.FutSettDate("SP"))
order.setField(quickfix.Price(quote.price))
order.setField(quickfix.TransactTime())
order.setField(quickfix.TimeInForce(quickfix.TimeInForce_IMMEDIATE_OR_CANCEL))
self.sendToTarget(order, self.orderSession)
def onExecutionReport(self, message, sessionID):
self.logger.info("FIXSIM-CLIENT EXECUTION REPORT %s", str(message))
def dispatchFromApp(self, msgType, message, beginString, sessionID):
if msgType == '8':
self.onExecutionReport(message, sessionID)
elif msgType == 'W':
self.onMarketDataSnapshotFullRefresh(message, sessionID)
| {
"content_hash": "4ed8cfb8a798a0804b9a4e35eb96209a",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 130,
"avg_line_length": 33.3728813559322,
"alnum_prop": 0.6419502285424074,
"repo_name": "gloryofrobots/fixsim",
"id": "470cfc4b924ded93b987b886c5b5086ea0255b22",
"size": "9845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixsim/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28602"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
# TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
# os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
class MatVecTest(test_lib.TestCase):
"""Simple test for matvec, which is sugar on top of matmul."""
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, test_util.deprecated_graph_mode_only(fn))
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegexp(
Exception, "(Shape must be rank 2 but is rank 1|is not a matrix)"):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegexp(
Exception, "(Dimensions must be equal|Matrix size-incompatible)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
for dtype in (np.int32, np.int64, np.float16, np.float32, np.float64,
np.complex64, np.complex128):
if not use_static_shape and (dtype == np.int32 or dtype == np.int64):
# TODO(rmlarsen): Re-enable this test when we have fixed the underlying
# bug in Windows (b/35935459).
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
| {
"content_hash": "97c5f8da10bfc99742ef8838d88cf14d",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 80,
"avg_line_length": 38.011583011583014,
"alnum_prop": 0.6052818689690198,
"repo_name": "ghchinoy/tensorflow",
"id": "a3dd7dbf2af3e9cecafc6a88dad7e916c8a51cfe",
"size": "10534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/matmul_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.sites.models import SiteManager, Site, RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.utils.functional import SimpleLazyObject
from . import get_current_request, set_thread_variable
def get_site(request):
if not hasattr(request, '_cached_site'):
domain = request.path.split('/')[1]
try:
site = Site.objects.get(domain=domain)
except (Site.DoesNotExist, Site.MultipleObjectsReturned):
try:
site = Site.objects.get(pk=settings.SITE_ID)
except Site.DoesNotExist:
site = RequestSite(request)
request._cached_site = site
return request._cached_site
class RequestSiteMiddleware(object):
def __init__(self):
'''
Override SiteManager.get_current to check for the request object first.
'''
def get_current_site(self, request=None):
if (request and request.site):
return request.site
elif request:
return SimpleLazyObject(lambda: get_site(request))
request = get_current_request()
return SimpleLazyObject(lambda: get_site(request))
SiteManager.get_current = get_current_site
def process_request(self, request):
set_thread_variable('request', request)
request.site = SimpleLazyObject(lambda: get_site(request))
| {
"content_hash": "b2d13470faa80d83cf2ab20521d24e7e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 35.46341463414634,
"alnum_prop": 0.6471801925722146,
"repo_name": "maxicecilia/simple_classroom",
"id": "e0e174e13ddcc8ced06c3146474594591639f07a",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_classroom/apps/core/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48156"
},
{
"name": "HTML",
"bytes": "30114"
},
{
"name": "JavaScript",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "84195"
}
],
"symlink_target": ""
} |
"""Command for listing subnetworks."""
from googlecloudsdk.api_lib.compute import base_classes
class List(base_classes.RegionalLister):
"""List subnetworks."""
@staticmethod
def Args(parser):
base_classes.RegionalLister.Args(parser)
parser.add_argument(
'--network',
help='Only show subnetworks of a specific network.')
@property
def service(self):
return self.compute.subnetworks
@property
def resource_type(self):
return 'subnetworks'
def Run(self, args):
for resource in super(List, self).Run(args):
if args.network is None or resource.get('network', None) == args.network:
yield resource
List.detailed_help = base_classes.GetRegionalListerHelp('subnetworks')
| {
"content_hash": "8ed165584fb2abed231ac8720ffa6346",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 24.6,
"alnum_prop": 0.7005420054200542,
"repo_name": "KaranToor/MA450",
"id": "cf91eaedc75475a89f2098bc8426f4147fac5abb",
"size": "1334",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/compute/networks/subnets/list.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
"""Copyright (C) 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Run tests on build"""
import re
import os
import sys
import subprocess
import shlex
import argparse
import pathlib
import platform
from genericpath import exists
from fnmatch import fnmatchcase
from xml.dom import minidom
import multiprocessing
import time
args = {}
OS_info = {}
timeout = False
test_proc = None
stop = 0
test_script = [ 'cd %IDIR%', '%XML%' ]
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(description="""
Checks build arguments
""")
parser.add_argument('-t', '--test', required=True,
help='Test set to run from rtest.xml (required, e.g. osdb)')
parser.add_argument('-g', '--debug', required=False, default=False, action='store_true',
help='Test Debug build (optional, default: false)')
parser.add_argument('-o', '--output', type=str, required=False, default="xml",
help='Test output file (optional, default: test_detail.xml)')
parser.add_argument( '--install_dir', type=str, required=False, default="build",
help='Installation directory where build or release folders are (optional, default: build)')
parser.add_argument( '--fail_test', default=False, required=False, action='store_true',
help='Return as if test failed (optional, default: false)')
# parser.add_argument('-v', '--verbose', required=False, default = False, action='store_true',
# help='Verbose install (optional, default: False)')
return parser.parse_args()
def vram_detect():
global OS_info
OS_info["VRAM"] = 0
if os.name == "nt":
cmd = "hipinfo.exe"
process = subprocess.run([cmd], stdout=subprocess.PIPE)
for line_in in process.stdout.decode().splitlines():
if 'totalGlobalMem' in line_in:
OS_info["VRAM"] = float(line_in.split()[1])
break
else:
cmd = "rocminfo"
process = subprocess.run([cmd], stdout=subprocess.PIPE)
for line_in in process.stdout.decode().splitlines():
match = re.search(r'.*Size:.*([0-9]+)\(.*\).*KB', line_in, re.IGNORECASE)
if match:
OS_info["VRAM"] = float(match.group(1))/(1024*1024)
break
def os_detect():
global OS_info
if os.name == "nt":
OS_info["ID"] = platform.system()
else:
inf_file = "/etc/os-release"
if os.path.exists(inf_file):
with open(inf_file) as f:
for line in f:
if "=" in line:
k,v = line.strip().split("=")
OS_info[k] = v.replace('"','')
OS_info["NUM_PROC"] = os.cpu_count()
vram_detect()
print(OS_info)
def create_dir(dir_path):
if os.path.isabs(dir_path):
full_path = dir_path
else:
full_path = os.path.join( os.getcwd(), dir_path )
return pathlib.Path(full_path).mkdir(parents=True, exist_ok=True)
def delete_dir(dir_path) :
if (not os.path.exists(dir_path)):
return
if os.name == "nt":
return run_cmd( "RMDIR" , f"/S /Q {dir_path}")
else:
linux_path = pathlib.Path(dir_path).absolute()
return run_cmd( "rm" , f"-rf {linux_path}")
class TimerProcess(multiprocessing.Process):
def __init__(self, start, stop, kill_pid):
multiprocessing.Process.__init__(self)
self.quit = multiprocessing.Event()
self.timed_out = multiprocessing.Event()
self.start_time = start
self.max_time = stop
self.kill_pid = kill_pid
def run(self):
while not self.quit.is_set():
#print( f'time_stop {self.start_time} limit {self.max_time}')
if (self.max_time == 0):
return
t = time.monotonic()
if ( t - self.start_time > self.max_time ):
print( f'killing {self.kill_pid} t {t}')
if os.name == "nt":
cmd = ['TASKKILL', '/F', '/T', '/PID', str(self.kill_pid)]
proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
else:
os.kill(self.kill_pid, signal.SIGKILL)
self.timed_out.set()
self.stop()
pass
def stop(self):
self.quit.set()
def stopped(self):
return self.timed_out.is_set()
def time_stop(start, pid):
global timeout, stop
while (True):
print( f'time_stop {start} limit {stop}')
t = time.monotonic()
if (stop == 0):
return
if ( (stop > 0) and (t - start > stop) ):
print( f'killing {pid} t {t}')
if os.name == "nt":
cmd = ['TASKKILL', '/F', '/T', '/PID', str(pid)]
proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
else:
test_proc.kill()
timeout = True
stop = 0
time.sleep(0)
def run_cmd(cmd, test = False, time_limit = 0):
global args
global test_proc, timer_thread
global stop
if (cmd.startswith('cd ')):
return os.chdir(cmd[3:])
if (cmd.startswith('mkdir ')):
return create_dir(cmd[6:])
cmdline = f"{cmd}"
print(cmdline)
try:
if not test:
proc = subprocess.run(cmdline, check=True, stderr=subprocess.STDOUT, shell=True)
status = proc.returncode
else:
error = False
timeout = False
test_proc = subprocess.Popen(shlex.split(cmdline), text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if time_limit > 0:
start = time.monotonic()
#p = multiprocessing.Process(target=time_stop, args=(start, test_proc.pid))
p = TimerProcess(start, time_limit, test_proc.pid)
p.start()
while True:
output = test_proc.stdout.readline()
if output == '' and test_proc.poll() is not None:
break
elif output:
outstring = output.strip()
print (outstring)
error = error or re.search(r'error|fail', outstring, re.IGNORECASE)
status = test_proc.poll()
if time_limit > 0:
p.stop()
p.join()
timeout = p.stopped()
print(f"timeout {timeout}")
if error:
status = 1
elif timeout:
status = 2
else:
status = test_proc.returncode
except:
import traceback
exc = traceback.format_exc()
print( "Python Exception: {0}".format(exc) )
status = 3
return status
def batch(script, xml):
global OS_info
global args
#
cwd = pathlib.os.curdir
rtest_cwd_path = os.path.abspath( os.path.join( cwd, 'rtest.xml') )
if os.path.isfile(rtest_cwd_path) and os.path.dirname(rtest_cwd_path).endswith( "staging" ):
# if in a staging directory then test locally
test_dir = cwd
else:
if args.debug: build_type = "debug"
else: build_type = "release"
test_dir = f"{args.install_dir}//{build_type}//clients//staging"
fail = False
for i in range(len(script)):
cmdline = script[i]
xcmd = cmdline.replace('%IDIR%', test_dir)
cmd = xcmd.replace('%ODIR%', args.output)
if cmd.startswith('tdir '):
if pathlib.Path(cmd[5:]).exists():
return 0 # all further cmds skipped
else:
continue
error = False
if cmd.startswith('%XML%'):
# run the matching tests listed in the xml test file
var_subs = {}
for var in xml.getElementsByTagName('var'):
name = var.getAttribute('name')
val = var.getAttribute('value')
var_subs[name] = val
for test in xml.getElementsByTagName('test'):
sets = test.getAttribute('sets')
runset = sets.split(',')
if args.test in runset:
for run in test.getElementsByTagName('run'):
name = run.getAttribute('name')
vram_limit = run.getAttribute('vram_min')
if vram_limit:
if OS_info["VRAM"] < float(vram_limit):
print( f'***\n*** Skipped: {name} due to VRAM req.\n***')
continue
if name:
print( f'***\n*** Running: {name}\n***')
time_limit = run.getAttribute('time_max')
if time_limit:
timeout = float(time_limit)
else:
timeout = 0
raw_cmd = run.firstChild.data
var_cmd = raw_cmd.format_map(var_subs)
error = run_cmd(var_cmd, True, timeout)
if (error == 2):
print( f'***\n*** Timed out when running: {name}\n***')
else:
error = run_cmd(cmd)
fail = fail or error
if (fail):
if (cmd == "%XML%"):
print(f"FAILED xml test suite!")
else:
print(f"ERROR running: {cmd}")
if (os.curdir != cwd):
os.chdir( cwd )
return 1
if (os.curdir != cwd):
os.chdir( cwd )
return 0
def run_tests():
global test_script
global xmlDoc
# install
cwd = os.curdir
xmlPath = os.path.join( cwd, 'rtest.xml')
xmlDoc = minidom.parse( xmlPath )
scripts = []
scripts.append( test_script )
for i in scripts:
if (batch(i, xmlDoc)):
#print("Failure in script. ABORTING")
if (os.curdir != cwd):
os.chdir( cwd )
return 1
if (os.curdir != cwd):
os.chdir( cwd )
return 0
def main():
global args
global timer_thread
os_detect()
args = parse_args()
status = run_tests()
if args.fail_test: status = 1
if (status):
sys.exit(status)
if __name__ == '__main__':
main()
| {
"content_hash": "b0809ed043a271cc000eebf0ad08e19b",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 135,
"avg_line_length": 35.07012195121951,
"alnum_prop": 0.5406415717638877,
"repo_name": "amcamd/hipBLAS",
"id": "4651bc55634d37ddd5b7954f709d4957a8940768",
"size": "11522",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "rtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Asymptote",
"bytes": "9128"
},
{
"name": "C",
"bytes": "4837"
},
{
"name": "C++",
"bytes": "7258840"
},
{
"name": "CMake",
"bytes": "50978"
},
{
"name": "Fortran",
"bytes": "1173244"
},
{
"name": "Groovy",
"bytes": "14613"
},
{
"name": "Python",
"bytes": "258424"
},
{
"name": "Shell",
"bytes": "36950"
}
],
"symlink_target": ""
} |
from tailorscad.process import render_scad
| {
"content_hash": "6e2aeeaeeb0afd1ffef2c5cd90826357",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.8604651162790697,
"repo_name": "savorywatt/tailorSCAD",
"id": "6caa00cbbee90baa44c698915e3d50717c29ddf2",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tailorscad/api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "290"
},
{
"name": "OpenSCAD",
"bytes": "291"
},
{
"name": "Python",
"bytes": "16795"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we only scan generated .h files once.
This originated as a real-life bug report submitted by Scott Lystig
Fritchie. It's been left as-is, rather than stripped down to bare
minimum, partly because it wasn't completely clear what combination of
factors triggered the bug Scott saw, and partly because the real-world
complexity is valuable in its own right.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir('reftree',
['reftree', 'include'],
'src',
['src', 'lib_geng'])
test.write('SConstruct', """\
###
### QQQ !@#$!@#$! I need to move the SConstruct file to be "above"
### both the source and install dirs, or the install dependencies
### don't seem to work well! ARRGH!!!!
###
experimenttop = r"%s"
import os
import Mylib
BStaticLibMerge = Builder(generator = Mylib.Gen_StaticLibMerge)
builders = Environment().Dictionary('BUILDERS')
builders["StaticLibMerge"] = BStaticLibMerge
env = Environment(BUILDERS = builders)
e = env.Dictionary() # Slightly easier to type
global_env = env
e["GlobalEnv"] = global_env
e["REF_INCLUDE"] = os.path.join(experimenttop, "reftree", "include")
e["REF_LIB"] = os.path.join(experimenttop, "reftree", "lib")
e["EXPORT_INCLUDE"] = os.path.join(experimenttop, "export", "include")
e["EXPORT_LIB"] = os.path.join(experimenttop, "export", "lib")
e["INSTALL_BIN"] = os.path.join(experimenttop, "install", "bin")
variant_dir = os.path.join(experimenttop, "tmp-bld-dir")
src_dir = os.path.join(experimenttop, "src")
env.Append(CPPPATH = [e["EXPORT_INCLUDE"]])
env.Append(CPPPATH = [e["REF_INCLUDE"]])
Mylib.AddLibDirs(env, "/via/Mylib.AddLibPath")
env.Append(LIBPATH = [e["EXPORT_LIB"]])
env.Append(LIBPATH = [e["REF_LIB"]])
Mylib.Subdirs(env, "src")
""" % test.workpath())
test.write('Mylib.py', """\
import os
import re
def Subdirs(env, dirlist):
for file in _subconf_list(dirlist):
env.SConscript(file, "env")
def _subconf_list(dirlist):
return [os.path.join(x, "SConscript") for x in dirlist.split()]
def StaticLibMergeMembers(local_env, libname, hackpath, files):
for file in files.split():
# QQQ Fix limits in grok'ed regexp
tmp = re.sub(".c$", ".o", file)
objname = re.sub(".cpp", ".o", tmp)
local_env.Object(target = objname, source = file)
e = 'local_env["GlobalEnv"].Append(%s = ["%s"])' % (libname, os.path.join(hackpath, objname))
exec(e)
def CreateMergedStaticLibrary(env, libname):
objpaths = env["GlobalEnv"][libname]
libname = "lib%s.a" % (libname)
env.StaticLibMerge(target = libname, source = objpaths)
# I put the main body of the generator code here to avoid
# namespace problems
def Gen_StaticLibMerge(source, target, env, for_signature):
target_string = ""
for t in target:
target_string = str(t)
subdir = os.path.dirname(target_string)
srclist = []
for src in source:
srclist.append(src)
return [["ar", "cq"] + target + srclist, ["ranlib"] + target]
def StaticLibrary(env, target, source):
env.StaticLibrary(target, source.split())
def SharedLibrary(env, target, source):
env.SharedLibrary(target, source.split())
def ExportHeader(env, headers):
env.Install(dir = env["EXPORT_INCLUDE"], source = headers.split())
def ExportLib(env, libs):
env.Install(dir = env["EXPORT_LIB"], source = libs.split())
def InstallBin(env, bins):
env.Install(dir = env["INSTALL_BIN"], source = bins.split())
def Program(env, target, source):
env.Program(target, source.split())
def AddCFlags(env, str):
env.Append(CPPFLAGS = " " + str)
# QQQ Synonym needed?
#def AddCFLAGS(env, str):
# AddCFlags(env, str)
def AddIncludeDirs(env, str):
env.Append(CPPPATH = str.split())
def AddLibs(env, str):
env.Append(LIBS = str.split())
def AddLibDirs(env, str):
env.Append(LIBPATH = str.split())
""")
test.write(['reftree', 'include', 'lib_a.h'], """\
char *a_letter(void);
""")
test.write(['reftree', 'include', 'lib_b.h'], """\
char *b_letter(void);
""")
test.write(['reftree', 'include', 'lib_ja.h'], """\
char *j_letter_a(void);
""")
test.write(['reftree', 'include', 'lib_jb.h.intentionally-moved'], """\
char *j_letter_b(void);
""")
test.write(['src', 'SConscript'], """\
# --- Begin SConscript boilerplate ---
import Mylib
Import("env")
#env = env.Clone() # Yes, clobber intentionally
#Make environment changes, such as: Mylib.AddCFlags(env, "-g -D_TEST")
#Mylib.Subdirs(env, "lib_a lib_b lib_mergej prog_x")
Mylib.Subdirs(env, "lib_geng")
env = env.Clone() # Yes, clobber intentionally
# --- End SConscript boilerplate ---
""")
test.write(['src', 'lib_geng', 'SConscript'], """\
# --- Begin SConscript boilerplate ---
import sys
import Mylib
Import("env")
#env = env.Clone() # Yes, clobber intentionally
#Make environment changes, such as: Mylib.AddCFlags(env, "-g -D_TEST")
#Mylib.Subdirs(env, "foo_dir")
env = env.Clone() # Yes, clobber intentionally
# --- End SConscript boilerplate ---
Mylib.AddCFlags(env, "-DGOOFY_DEMO")
Mylib.AddIncludeDirs(env, ".")
# Not part of Scott Lystig Fritchies's original stuff:
# On Windows, it's import to use the original test environment
# when we invoke SCons recursively.
import os
recurse_env = env.Clone()
recurse_env["ENV"] = os.environ
# Icky code to set up process environment for "make"
# I really ought to drop this into Mylib....
fromdict = env.Dictionary()
todict = env["ENV"]
import SCons.Util
import re
for k in fromdict.keys():
if k != "ENV" and k != "SCANNERS" and k != "CFLAGS" and k != "CXXFLAGS" \
and not SCons.Util.is_Dict(fromdict[k]):
# The next line can fail on some systems because it would try to
# do env.subst on:
# $RMIC $RMICFLAGS -d ${TARGET.attributes.java_lookupdir} ...
# When $TARGET is None, so $TARGET.attributes would throw an
# exception, which SCons would turn into a UserError. They're
# not important for this test, so just catch 'em.
f = fromdict[k]
try:
todict[k] = env.subst(f)
except SCons.Errors.UserError:
pass
todict["CFLAGS"] = fromdict["CPPFLAGS"] + " " + \
' '.join(["-I" + x for x in env["CPPPATH"]]) + " " + \
' '.join(["-L" + x for x in env["LIBPATH"]])
todict["CXXFLAGS"] = todict["CFLAGS"]
generated_hdrs = "libg_gx.h libg_gy.h libg_gz.h"
static_hdrs = "libg_w.h"
#exported_hdrs = generated_hdrs + " " + static_hdrs
exported_hdrs = static_hdrs
lib_name = "g"
lib_fullname = env.subst("${LIBPREFIX}g${LIBSUFFIX}")
lib_srcs = "libg_1.c libg_2.c libg_3.c".split()
import re
lib_objs = [re.sub("\.c$", ".o", x) for x in lib_srcs]
Mylib.ExportHeader(env, exported_hdrs)
Mylib.ExportLib(env, lib_fullname)
# The following were the original commands from Scott Lystic Fritchie,
# making use of a shell script and a Makefile to build the library.
# These have been preserved, commented out below, but in order to make
# this test portable, we've replaced them with a Python script and a
# recursive invocation of SCons (!).
#cmd_both = "cd %s ; make generated ; make" % Dir(".")
#cmd_generated = "cd %s ; sh MAKE-HEADER.sh" % Dir(".")
#cmd_justlib = "cd %s ; make" % Dir(".")
_ws = re.compile('\s')
def escape(s):
if _ws.search(s):
s = '"' + s + '"'
return s
cmd_generated = "%s $SOURCE" % escape(sys.executable)
cmd_justlib = "%s %s -C ${SOURCES[0].dir}" % (escape(sys.executable),
escape(sys.argv[0]))
##### Deps appear correct ... but wacky scanning?
# Why?
#
# SCons bug??
env.Command(generated_hdrs.split(),
["MAKE-HEADER.py"],
cmd_generated)
recurse_env.Command([lib_fullname] + lib_objs,
lib_srcs + (generated_hdrs + " " + static_hdrs).split(),
cmd_justlib)
""")
test.write(['src', 'lib_geng', 'MAKE-HEADER.py'], """\
#!/usr/bin/env python
import os
import os.path
import sys
# chdir to the directory in which this script lives
os.chdir(os.path.split(sys.argv[0])[0])
for h in ['libg_gx.h', 'libg_gy.h', 'libg_gz.h']:
open(h, 'w').write('')
""")
test.write(['src', 'lib_geng', 'SConstruct'], """\
import os
Scanned = {}
def write_out(file, dict):
f = open(file, 'wb')
for k in sorted(dict.keys()):
file = os.path.split(k)[1]
f.write(file + ": " + str(dict[k]) + "\\n")
f.close()
# A hand-coded new-style class proxy to wrap the underlying C Scanner
# with a method that counts the calls.
#
# This is more complicated than it used to be with old-style classes
# because the .__*__() methods in new-style classes are not looked
# up on the instance, but resolve to the actual wrapped class methods,
# so we have to handle those directly.
class CScannerCounter(object):
def __init__(self, original_CScanner, *args, **kw):
self.original_CScanner = original_CScanner
def __cmp__(self, *args, **kw):
return self.original_CScanner.__cmp__(*args, **kw)
def __hash__(self, *args, **kw):
return self.original_CScanner.__hash__(*args, **kw)
def __str__(self, *args, **kw):
return self.original_CScanner.__str__(*args, **kw)
def __getattr__(self, *args, **kw):
return self.original_CScanner.__getattribute__(*args, **kw)
def __call__(self, node, *args, **kw):
global Scanned
n = str(node)
try:
Scanned[n] = Scanned[n] + 1
except KeyError:
Scanned[n] = 1
write_out(r'%s', Scanned)
return self.original_CScanner(node, *args, **kw)
import SCons.Tool
MyCScanner = CScannerCounter(SCons.Script.CScanner)
SCons.Tool.SourceFileScanner.add_scanner('.c', MyCScanner)
env = Environment(CPPPATH = ".")
l = env.StaticLibrary("g", Split("libg_1.c libg_2.c libg_3.c"))
Default(l)
""" % test.workpath('MyCScan.out'))
# These were the original shell script and Makefile from SLF's original
# bug report. We're not using them--in order to make this script as
# portable as possible, we're using a Python script and a recursive
# invocation of SCons--but we're preserving them here for history.
#test.write(['src', 'lib_geng', 'MAKE-HEADER.sh'], """\
##!/bin/sh
#
#exec touch $*
#""")
#
#test.write(['src', 'lib_geng', 'Makefile'], """\
#all: libg.a
#
#GEN_HDRS = libg_gx.h libg_gy.h libg_gz.h
#STATIC_HDRS = libg_w.h
#
#$(GEN_HDRS): generated
#
#generated: MAKE-HEADER.sh
# sh ./MAKE-HEADER.sh $(GEN_HDRS)
#
#libg.a: libg_1.o libg_2.o libg_3.o
# ar r libg.a libg_1.o libg_2.o libg_3.o
#
#libg_1.c: $(STATIC_HDRS) $(GEN_HDRS)
#libg_2.c: $(STATIC_HDRS) $(GEN_HDRS)
#libg_3.c: $(STATIC_HDRS) $(GEN_HDRS)
#
#clean:
# -rm -f $(GEN_HDRS)
# -rm -f libg.a *.o core core.*
#""")
test.write(['src', 'lib_geng', 'libg_w.h'], """\
""")
test.write(['src', 'lib_geng', 'libg_1.c'], """\
#include <libg_w.h>
#include <libg_gx.h>
int g_1()
{
return 1;
}
""")
test.write(['src', 'lib_geng', 'libg_2.c'], """\
#include <libg_w.h>
#include <libg_gx.h>
#include <libg_gy.h>
#include <libg_gz.h>
int g_2()
{
return 2;
}
""")
test.write(['src', 'lib_geng', 'libg_3.c'], """\
#include <libg_w.h>
#include <libg_gx.h>
int g_3()
{
return 3;
}
""")
test.run(stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
# Note that the generated .h files still get scanned twice,
# but that's really once each as a child of libg_1.o and libg_2.o.
#
# TODO(sgk): can the duplication be eliminated safely? Batch build
# support "eliminated" the duplication before in a way that broke a
# use case that ended up in test/Depends/no-Builder.py (issue 2647).
test.must_match("MyCScan.out", """\
libg_1.c: 1
libg_2.c: 1
libg_3.c: 1
libg_gx.h: 2
libg_gy.h: 1
libg_gz.h: 1
libg_w.h: 2
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "9fc267dbd442a991ffdb0c33ec2b6efc",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 101,
"avg_line_length": 28.52757793764988,
"alnum_prop": 0.6369367854741089,
"repo_name": "andrewyoung1991/scons",
"id": "845111ce9f501599e7ee3e1a1418903cccd87236",
"size": "12998",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/Scanner/generated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "746"
},
{
"name": "C++",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "44714"
},
{
"name": "Python",
"bytes": "7385906"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52194"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
'''
Created on Jul 9, 2013
@author: nshearer
'''
from py_wizard.WizardUserInterface import WizardUserInterface
from ConsoleQuestionPresenter import ConsoleQuestionPresenter
from py_wizard.questions.SimpleQuestion import SimpleQuestion
from ConsoleSimpleQuestion import ConsoleSimpleQuestion
from py_wizard.questions.NameQuestion import NameQuestion
from ConsoleNameQuestion import ConsoleNameQuestion
from py_wizard.questions.YesNoQuestion import YesNoQuestion
from ConsoleYesNoQuestion import ConsoleYesNoQuestion
from py_wizard.questions.IntQuestion import IntQuestion
from ConsoleIntQuestion import ConsoleIntQuestion
from py_wizard.questions.CurrencyQuestion import CurrencyQuestion
from ConsoleCurrencyQuestion import ConsoleCurrencyQuestion
from py_wizard.questions.DateQuestion import DateQuestion
from ConsoleDateQuestion import ConsoleDateQuestion
from py_wizard.questions.ActionPrompt import ActionPrompt
from ConsoleActionPrompt import ConsoleActionPrompt
from py_wizard.questions.ParagraphQuestion import ParagraphQuestion
from ConsoleParagraphQuestion import ConsoleParagraphQuestion
from py_wizard.questions.ListQuestion import ListQuestion
from ConsoleListQuestion import ConsoleListQuestion
from py_wizard.questions.SelectQuestion import SelectQuestion
from ConsoleSelectQuestion import ConsoleSelectQuestion
class ConsoleInterface(WizardUserInterface):
'''Interface optimized for interacting via the console'''
def __init__(self):
super(ConsoleInterface, self).__init__()
def build_standard_q_presenter(self, question):
'''Wrap a question in a question presenter for this interface'''
if isinstance(question, NameQuestion):
return ConsoleNameQuestion(question)
if isinstance(question, ActionPrompt):
return ConsoleActionPrompt(question)
if isinstance(question, YesNoQuestion):
return ConsoleYesNoQuestion(question)
if isinstance(question, IntQuestion):
return ConsoleIntQuestion(question)
if isinstance(question, CurrencyQuestion):
return ConsoleCurrencyQuestion(question)
if isinstance(question, DateQuestion):
return ConsoleDateQuestion(question)
if isinstance(question, ParagraphQuestion):
return ConsoleParagraphQuestion(question)
if isinstance(question, ListQuestion):
return ConsoleListQuestion(question)
if isinstance(question, SelectQuestion):
return ConsoleSelectQuestion(question)
if isinstance(question, SimpleQuestion):
return ConsoleSimpleQuestion(question)
return None
def _validate_currect_presenter_class(self, presenter_class):
'''Make sure the presenter class is appropriate for this interface'''
if not isinstance(presenter_class, ConsoleQuestionPresenter):
msg = "Question presenter %s needs to be inherited from %s"
msg % (presenter_class.__name__, 'ConsoleQuestionPresenter')
raise Exception(msg)
def present_question(self, wrapper):
'''Present question (wrapped in a question presenter class) to user'''
wrapper.ask()
def inform_user(self, description):
'''Inform the user of anything.
Typically this will be akin to a print'''
print description
def inform_user_of_action(self, description):
'''Inform the user of an action being performed'''
print description
| {
"content_hash": "dc19a7cd2205a28cc74fc99531d44321",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 35.28971962616822,
"alnum_prop": 0.6957097457627118,
"repo_name": "shearern/PyWizard",
"id": "2527d23930ade34cb0f82746f9fb205de3f9afe0",
"size": "3776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/py_wizard/console_wiz_iface/ConsoleInterface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76176"
}
],
"symlink_target": ""
} |
from reporter.core import SqlReport, Schedule
from reporter.uhl_reports.civicrm import get_contact_link
from reporter.emailing import RECIPIENT_IT_DQ
class CivicrmPracticeDuplicateCode(SqlReport):
def __init__(self):
super().__init__(
introduction=("The following GP Practices do not have "
"a duplicate code in CiviCRM"),
recipients=[RECIPIENT_IT_DQ],
sql='''
SELECT
x.practice_code,
con.id,
con.display_name
FROM (
SELECT
RTRIM(LTRIM(COALESCE(gp.practice_code_7, ''))) [practice_code]
FROM STG_CiviCRM.dbo.civicrm_contact con
LEFT JOIN STG_CiviCRM.dbo.civicrm_value_gp_surgery_data_3 gp
ON gp.entity_id = con.id
WHERE con.contact_type = 'Organization'
AND con.contact_sub_type LIKE '%GP_Surgery%'
AND con.is_deleted = 0
AND LEN(RTRIM(LTRIM(COALESCE(gp.practice_code_7, '')))) > 0
GROUP BY RTRIM(LTRIM(COALESCE(gp.practice_code_7, '')))
HAVING COUNT(*) > 1
) x
JOIN STG_CiviCRM.dbo.civicrm_value_gp_surgery_data_3 gp
ON gp.practice_code_7 = x.practice_code
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = gp.entity_id
AND con.is_deleted = 0
;
''',
schedule=Schedule.daily
)
def get_report_line(self, row):
return '- {}: {}\r\n'.format(
row['practice_code'],
get_contact_link(
row['display_name'], row['id']))
| {
"content_hash": "f41e50d089e2e837a6ca5a47a23d8418",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 37.82222222222222,
"alnum_prop": 0.5264394829612221,
"repo_name": "LCBRU/reporter",
"id": "8307fdb74ea1fcdc565961c3319d2ab4a05398dc",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporter/uhl_reports/civicrm/practice_duplicate_code.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "386"
},
{
"name": "HTML",
"bytes": "3199"
},
{
"name": "Python",
"bytes": "600192"
}
],
"symlink_target": ""
} |
def title():
return ''
def content():
return 'This is a placeholder for plots displaying statistics for phase 1 proposals.'
| {
"content_hash": "53dd1a33cdb035a426557014a06ed5f3",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 89,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7014925373134329,
"repo_name": "saltastro/salt-data-quality-site",
"id": "b1f96182958e1ee5c21ef78b01f53f5d5a1d00ae",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/pages/proposal/phase1/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1596"
},
{
"name": "Gherkin",
"bytes": "144"
},
{
"name": "HTML",
"bytes": "9794"
},
{
"name": "JavaScript",
"bytes": "309228"
},
{
"name": "Python",
"bytes": "198027"
},
{
"name": "Shell",
"bytes": "441"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import pytest
from pandas.compat import lrange, string_types
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_keep(keep, expected):
df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A'])
def test_duplicated_subset(subset, keep):
df = DataFrame({'A': [0, 1, 1, 2, 0],
'B': ['a', 'b', 'b', 'c', 'a'],
'C': [np.nan, 3, 3, None, np.nan]})
if subset is None:
subset = list(df.columns)
elif isinstance(subset, string_types):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_drop_duplicates():
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('df', [
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=['A', 'B', 'C']),
DataFrame(index=[]),
DataFrame(index=['A', 'B', 'C'])
])
def test_drop_duplicates_empty(df):
# GH 20516
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
tm.assert_frame_equal(result, df)
def test_drop_duplicates_NA():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
| {
"content_hash": "c193b1671f7a7b0445b4dd7538ed3cb6",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 79,
"avg_line_length": 31.6401766004415,
"alnum_prop": 0.5578734389171841,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "f61dbbdb989e4b0a912f383417ee6726ba60657c",
"size": "14358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/frame/test_duplicates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
BCRYPT_LOG_ROUNDS = 12
# Form
SECRET_KEY = 'I cannot forget may eighth'
UPLOAD_FOLDER = 'hjlog/static/image/photo'
ALLOWED_EXTENSIONS = set(['gif', 'jpg', 'jpeg', 'png'])
# SQLAlchemy
if os.environ.get('DATABASE_URL') is None:
SQLALCHEMY_DATABASE_URI = (
'sqlite:///' + os.path.join(basedir, 'hjlog.db') +
'?check_same_thread=False')
else:
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_RECORD_QUERIES = True
# MAX file upload size
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
| {
"content_hash": "ce46ca3e7e6c9a59da088f5ca6909ca2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 25.5,
"alnum_prop": 0.6636500754147813,
"repo_name": "heejongahn/hjlog",
"id": "08d38d458a1dba0b2680cc268d7c981548bb39c1",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9657"
},
{
"name": "HTML",
"bytes": "16405"
},
{
"name": "JavaScript",
"bytes": "7114"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "28156"
},
{
"name": "Shell",
"bytes": "806"
}
],
"symlink_target": ""
} |
"""
Classes for managing in-memory trajectories.
"""
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
class Trajectory(pd.DataFrame):
"""
A single trajectory is a DF of points, attributes, and computed values.
Also a linked-list with next_traj to allow a series of computations transforming from trajectory to trajectory (smoothing, etc.)
"""
def pointwise_compute(self, func, name):
"""
Compute a value for each point in the trajectory, and add it as a new column.
:param func: Trajectory->column
:param name: what should we call this new column?
:return: self
"""
self["comp_{}".format(name)] = func(self)
return self
def trajectory_compute(self, func, name):
"""
Compute a new trajectory based on this one
:param func: the function to apply to this trajectory
:param name: the name of the resulting trajectory
:return: tuple (name, trajectory) representing the resulting trajectory
"""
self.next_traj = (name, func(self))
return self.next_traj
def get_next(self, depth):
"""
get next_traj (recursive)
:param depth: how deep is the trajectory to return?
:return: Trajectory
"""
if depth == 0:
return self
else:
return self.next_traj[1].get_next(depth-1)
def get_attr(self, attr):
return self["attr_{}".format(attr)]
def get_comp(self, comp):
return self["comp_{}".format(comp)]
def copy(self):
# TODO: a constructor for this...
new_traj = Trajectory(super(Trajectory, self).copy())
new_traj.id = self.id
new_traj.geo_cols = self.geo_cols
return new_traj
def limit_first(self, k):
to_drop = self.index.values[range(k, len(self))]
self.drop(to_drop, axis=0, inplace=True)
return self
def combine(self, other, combine_f, name):
# combine the data
combine_col = combine_f(self, other)
data1 = self.ix[other.index.values[np.where(combine_col == 0)[0]]]
data2 = other.ix[other.index.values[np.where(combine_col == 1)[0]]]
data = pd.concat([data1, data2])
data.sort_index(inplace=True)
# build new Trajectory
t = Trajectory(data)
t.id = self.id
t.geo_cols = self.geo_cols
self.next_traj = (name, t)
return self.next_traj
class TrajectorySet(object):
"""
Represent a set of Trajectory objects, with associated metadata
"""
def __init__(self):
self.__trajectories = dict()
self.depth = 0
def copy(self):
"""
:return: Shallow copy
"""
cp = TrajectorySet()
cp.__trajectories = self.__trajectories.copy()
return cp
def filter(self, func):
"""
Discard some of the trajectories
:param func: function f:Trajectory->bool ; selects which Trajectory to keep
:return: self
"""
self.__trajectories = {key: val for key, val in self.__trajectories.iteritems()
if func(val)}
return self
def apply(self, func):
"""
Replace trajectories by applying a function
:param func: f: Trajectory->Trajectory
:return: self
"""
self.__trajectories = {key: func(val) for key, val in self.__trajectories.iteritems()}
return self
def load_frame(self, data, id_col="TAG", time_col="TIME", geo_cols=("X", "Y"),
attr_cols=("NBS", "VARX", "VARY", "COVXY"), sort_time=True):
"""
:param data:
:param id_col: this is the column that identifies a trajectory in the db (ex. animal_id)
:param time_col: doesn't have to be DateTime
:param geo_cols: the position information
:param attr_cols: the attributes of localizations to keep in the Trajectory object
:param sort_time: sort the rows of each trajectory by time? (Default: True)
:return: self
"""
# Keep only the columns requested, and give indicative names
cols = [time_col] + list(geo_cols) + list(attr_cols)
col_names = ["TIME"] + list(geo_cols) + ["attr_{}".format(attr) for attr in attr_cols]
# Make Trajectory objects
for id_ in data[id_col].unique():
self.__trajectories[id_] = Trajectory(data.loc[data[id_col] == id_, cols])
self.__trajectories[id_].id = id_
self.__trajectories[id_].geo_cols = geo_cols
self.__trajectories[id_].columns = col_names
self.__trajectories[id_].set_index("TIME", inplace=True)
if sort_time:
self.__trajectories[id_].sort_index(inplace=True)
return self
def load_sqlite(self, path, id_col="TAG", time_col="TIME", geo_cols=("X", "Y"),
attr_cols=("NBS", "VARX", "VARY", "COVXY"), sort_time=True):
# Sqlite connect
conn = create_engine('sqlite:///{}'.format(path))
data = pd.read_sql_table("LOCALIZATIONS", conn)
# Use the regular DataFrame loader
return self.load_frame(data, id_col, time_col, geo_cols, attr_cols, sort_time)
def get(self, id_):
return self.__trajectories[id_]
def get_final(self, id_):
"""
Get the last trajectory in the computtation chain
:param id_: the id of the trajectory to retrieve
:return: Trajectory
"""
return self.get(id_).get_next(self.depth)
def iter(self):
for id_ in self.__trajectories:
yield self.__trajectories[id_]
def ids(self):
return self.__trajectories.keys()
def pointwise_compute(self, func, name):
for t in self.iter():
t.get_next(self.depth).pointwise_compute(func, name)
return self
def trajectory_compute(self, func, name, depth=None):
depth = depth or self.depth
self.depth += 1
for t in self.iter():
t.get_next(depth).trajectory_compute(func, name)
return self
def combine(self, other, depth, other_depth, combine_f, name):
trajs = self.ids()
for id_ in trajs:
self.get(id_).get_next(depth).combine(other.get(id_).get_next(other_depth), combine_f, name)
return self
def __repr__(self):
ans = {}
for t in self.iter():
id = t.id
d = ["raw ({})".format(t.shape[0])]
for i in range(self.depth):
name, next = t.get_next(i).next_traj
d.append("{} ({})".format(name, next.shape[0]))
ans[id] = d
return pd.DataFrame(ans).T.__repr__()
if __name__ == "__main__":
pass | {
"content_hash": "009fb20db7b1ab169a45ea150e89ce9c",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 132,
"avg_line_length": 33.54679802955665,
"alnum_prop": 0.5737151248164464,
"repo_name": "Hezi-Resheff/trajectory-aa-move-ecol",
"id": "7f5676c0a3064aee5263736fb1baca477e18fc38",
"size": "6812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metraj/trajectory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40206"
}
],
"symlink_target": ""
} |
"""
Small module for use with the wake on lan protocol.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import socket
import struct
BROADCAST_IP = '255.255.255.255'
DEFAULT_PORT = 9
def create_magic_packet(macaddress):
"""
Create a magic packet which can be used for wake on lan using the
mac address given as a parameter.
Keyword arguments:
:arg macaddress: the mac address that should be parsed into a magic
packet.
"""
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream
data = b'FFFFFFFFFFFF' + (macaddress * 20).encode()
send_data = b''
# Split up the hex values in pack
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i:i + 2], 16))
return send_data
def send_magic_packet(*macs, **kwargs):
"""
Wakes the computer with the given mac address if wake on lan is
enabled on that host.
Keyword arguments:
:arguments macs: One or more macaddresses of machines to wake.
:key ip_address: the ip address of the host to send the magic packet
to (default "255.255.255.255")
:key port: the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword ' 'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close()
| {
"content_hash": "9c5d5b6b9ea96dc5af4f1ad1ac9f8360",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 99,
"avg_line_length": 27.930555555555557,
"alnum_prop": 0.634012928891099,
"repo_name": "ywangd/stash",
"id": "48c1a51a97dcdf2e6982889462ee001d9659dd58",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/wakeonlan/wol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "939583"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
} |
"""this example illustrates how to replace SQLAlchemy's class descriptors with
a user-defined system.
This sort of thing is appropriate for integration with frameworks that
redefine class behaviors in their own way, such that SQLA's default
instrumentation is not compatible.
The example illustrates redefinition of instrumentation at the class level as
well as the collection level, and redefines the storage of the class to store
state within "instance._goofy_dict" instead of "instance.__dict__". Note that
the default collection implementations can be used with a custom attribute
system as well.
"""
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, Text,\
ForeignKey
from sqlalchemy.orm import mapper, relationship, Session,\
InstrumentationManager
from sqlalchemy.orm.attributes import set_attribute, get_attribute, \
del_attribute
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.orm.collections import collection_adapter
class MyClassState(InstrumentationManager):
def __init__(self, cls):
self.states = {}
def instrument_attribute(self, class_, key, attr):
pass
def install_descriptor(self, class_, key, attr):
pass
def uninstall_descriptor(self, class_, key, attr):
pass
def instrument_collection_class(self, class_, key, collection_class):
return MyCollection
def get_instance_dict(self, class_, instance):
return instance._goofy_dict
def initialize_instance_dict(self, class_, instance):
instance.__dict__['_goofy_dict'] = {}
def initialize_collection(self, key, state, factory):
data = factory()
return MyCollectionAdapter(key, state, data), data
def install_state(self, class_, instance, state):
self.states[id(instance)] = state
def state_getter(self, class_):
def find(instance):
return self.states[id(instance)]
return find
class MyClass(object):
__sa_instrumentation_manager__ = MyClassState
def __init__(self, **kwargs):
for k in kwargs:
setattr(self, k, kwargs[k])
def __getattr__(self, key):
if is_instrumented(self, key):
return get_attribute(self, key)
else:
try:
return self._goofy_dict[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if is_instrumented(self, key):
set_attribute(self, key, value)
else:
self._goofy_dict[key] = value
def __delattr__(self, key):
if is_instrumented(self, key):
del_attribute(self, key)
else:
del self._goofy_dict[key]
class MyCollectionAdapter(object):
"""An wholly alternative instrumentation implementation."""
def __init__(self, key, state, collection):
self.key = key
self.state = state
self.collection = collection
setattr(collection, '_sa_adapter', self)
def unlink(self, data):
setattr(data, '_sa_adapter', None)
def adapt_like_to_iterable(self, obj):
return iter(obj)
def append_with_event(self, item, initiator=None):
self.collection.add(item, emit=initiator)
def append_multiple_without_event(self, items):
self.collection.members.extend(items)
def append_without_event(self, item):
self.collection.add(item, emit=False)
def remove_with_event(self, item, initiator=None):
self.collection.remove(item, emit=initiator)
def remove_without_event(self, item):
self.collection.remove(item, emit=False)
def clear_with_event(self, initiator=None):
for item in list(self):
self.remove_with_event(item, initiator)
def clear_without_event(self):
for item in list(self):
self.remove_without_event(item)
def __iter__(self):
return iter(self.collection)
def fire_append_event(self, item, initiator=None):
if initiator is not False and item is not None:
self.state.get_impl(self.key).\
fire_append_event(self.state, self.state.dict, item,
initiator)
def fire_remove_event(self, item, initiator=None):
if initiator is not False and item is not None:
self.state.get_impl(self.key).\
fire_remove_event(self.state, self.state.dict, item,
initiator)
def fire_pre_remove_event(self, initiator=None):
self.state.get_impl(self.key).\
fire_pre_remove_event(self.state, self.state.dict,
initiator)
class MyCollection(object):
def __init__(self):
self.members = list()
def add(self, object, emit=None):
self.members.append(object)
collection_adapter(self).fire_append_event(object, emit)
def remove(self, object, emit=None):
collection_adapter(self).fire_pre_remove_event(object)
self.members.remove(object)
collection_adapter(self).fire_remove_event(object, emit)
def __getitem__(self, index):
return self.members[index]
def __iter__(self):
return iter(self.members)
def __len__(self):
return len(self.members)
if __name__ == '__main__':
meta = MetaData(create_engine('sqlite://'))
table1 = Table('table1', meta,
Column('id', Integer, primary_key=True),
Column('name', Text))
table2 = Table('table2', meta,
Column('id', Integer, primary_key=True),
Column('name', Text),
Column('t1id', Integer, ForeignKey('table1.id')))
meta.create_all()
class A(MyClass):
pass
class B(MyClass):
pass
mapper(A, table1, properties={
'bs':relationship(B)
})
mapper(B, table2)
a1 = A(name='a1', bs=[B(name='b1'), B(name='b2')])
assert a1.name == 'a1'
assert a1.bs[0].name == 'b1'
assert isinstance(a1.bs, MyCollection)
sess = Session()
sess.add(a1)
sess.commit()
a1 = sess.query(A).get(a1.id)
assert a1.name == 'a1'
assert a1.bs[0].name == 'b1'
assert isinstance(a1.bs, MyCollection)
a1.bs.remove(a1.bs[0])
sess.commit()
a1 = sess.query(A).get(a1.id)
assert len(a1.bs) == 1
| {
"content_hash": "cdfbd88a58b06eb5fc0895035c1c84ea",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 78,
"avg_line_length": 31.36231884057971,
"alnum_prop": 0.6115218730745533,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "ebd18a6fa016627036a683df833541c3cc4e4e84",
"size": "6492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/sqlalchemy/examples/custom_attributes/custom_management.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
} |
import io
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for dirpath, dirnames, filenames in os.walk(cwd):
for filename in filenames:
if not filename.endswith('.py') or '__init__' in filename:
continue
path = os.path.join(dirpath, filename)
with io.open(path, encoding='utf-8') as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
| {
"content_hash": "45c0e05ad8c336f90d71f582a608e7ee",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 31.96,
"alnum_prop": 0.5369211514392991,
"repo_name": "tkerola/chainer",
"id": "2fe3ca5aaf9d31f81d7b31a1830c6647c578d1ed",
"size": "799",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/test_runnable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from tagging.registry import register
from geopy import distance
from ..machinetags.models import MachineTaggedItem, add_machinetag
RESERVED_USERNAMES = set((
# Trailing spaces are essential in these strings, or split() will be buggy
'feed www help security porn manage smtp fuck pop manager api owner shit '
'secure ftp discussion blog features test mail email administrator '
'xmlrpc web xxx pop3 abuse atom complaints news information imap cunt rss '
'info pr0n about forum admin weblog team feeds root about info news blog '
'forum features discussion email abuse complaints map skills tags ajax '
'comet poll polling thereyet filter search zoom machinetags search django '
'people profiles profile person navigate nav browse manage static css img '
'javascript js code flags flag country countries region place places '
'photos owner maps upload geocode geocoding login logout openid openids '
'recover lost signup reports report flickr upcoming mashups recent irc '
'group groups bulletin bulletins messages message newsfeed events company '
'companies active'
).split())
class CountryManager(models.Manager):
def top_countries(self):
return self.get_queryset().order_by('-num_people')
class Country(models.Model):
# Longest len('South Georgia and the South Sandwich Islands') = 44
name = models.CharField(_('Name'), max_length=50)
iso_code = models.CharField(_('ISO code'), max_length=2, unique=True)
iso_numeric = models.CharField(_('ISO numeric code'), max_length=3,
unique=True)
iso_alpha3 = models.CharField(_('ISO alpha-3'), max_length=3, unique=True)
fips_code = models.CharField(_('FIPS code'), max_length=2, unique=True)
continent = models.CharField(_('Continent'), max_length=2)
# Longest len('Grand Turk (Cockburn Town)') = 26
capital = models.CharField(_('Capital'), max_length=30, blank=True)
area_in_sq_km = models.FloatField(_('Area in square kilometers'))
population = models.IntegerField(_('Population'))
currency_code = models.CharField(_('Currency code'), max_length=3)
# len('en-IN,hi,bn,te,mr,ta,ur,gu,ml,kn,or,pa,as,ks,sd,sa,ur-IN') = 56
languages = models.CharField(_('Languages'), max_length=60)
geoname_id = models.IntegerField(_('Geonames ID'))
# Bounding boxes
bbox_west = models.FloatField()
bbox_north = models.FloatField()
bbox_east = models.FloatField()
bbox_south = models.FloatField()
# De-normalised
num_people = models.IntegerField(_('Number of people'), default=0)
objects = CountryManager()
def top_regions(self):
# Returns populated regions in order of population
regions = self.region_set.order_by('-num_people')
return regions.select_related('country')
class Meta:
ordering = ('name',)
verbose_name = _('Country')
verbose_name_plural = _('Countries')
def __str__(self):
return '%s' % self.name
@property
def flag_url(self):
return 'djangopeople/img/flags/%s.svg' % self.iso_code.lower()
class Region(models.Model):
code = models.CharField(_('Code'), max_length=20)
name = models.CharField(_('Name'), max_length=50)
country = models.ForeignKey(Country, models.CASCADE, verbose_name=_('Country'))
flag = models.CharField(_('Flag'), max_length=100, blank=True)
bbox_west = models.FloatField()
bbox_north = models.FloatField()
bbox_east = models.FloatField()
bbox_south = models.FloatField()
# De-normalised
num_people = models.IntegerField(_('Number of people'), default=0)
def get_absolute_url(self):
return reverse('country_region', args=[self.country.iso_code.lower(),
self.code.lower()])
def __str__(self):
return '%s' % self.name
class Meta:
ordering = ('name',)
verbose_name = _('Region')
verbose_name_plural = _('Regions')
@property
def flag_url(self):
return 'djangopeople/%s' % self.flag
@property
def small_flag_url(self):
return 'djangopeople/img/regions/%s/%s.png' % (
self.country.iso_code.lower(),
self.code.lower(),
)
class DjangoPerson(models.Model):
user = models.OneToOneField(User, models.CASCADE, verbose_name=_('User'))
bio = models.TextField(_('Bio'), blank=True)
# Location stuff - all location fields are required
country = models.ForeignKey(Country, models.CASCADE, verbose_name=_('Country'))
region = models.ForeignKey(
Region,
models.CASCADE,
verbose_name=_('Region'),
blank=True,
null=True,
)
latitude = models.FloatField(_('Latitude'))
longitude = models.FloatField(_('Longitude'))
location_description = models.CharField(_('Location'), max_length=50)
# Stats
profile_views = models.IntegerField(_('Profile views'), default=0)
# Machine tags
machinetags = GenericRelation(MachineTaggedItem)
add_machinetag = add_machinetag
# OpenID delegation
openid_server = models.URLField(_('OpenID server'), max_length=255,
blank=True)
openid_delegate = models.URLField(_('OpenID delegate'), max_length=255,
blank=True)
# Last active on IRC
last_active_on_irc = models.DateTimeField(_('Last active on IRC'),
blank=True, null=True)
@property
def latitude_str(self):
return str(self.latitude)
@property
def longitude_str(self):
return str(self.longitude)
def irc_nick(self):
try:
return self.machinetags.filter(namespace='im',
predicate='django')[0].value
except IndexError:
return _('<none>')
def get_nearest(self, num=5):
"Returns the nearest X people, but only within the same continent"
# TODO: Add caching
people = list(self.country.djangoperson_set.select_related().exclude(
pk=self.id,
))
if len(people) <= num:
# Not enough in country
# use people from the same continent instead
people = list(DjangoPerson.objects.filter(
country__continent=self.country.continent,
).exclude(pk=self.id).select_related())
# Sort and annotate people by distance
for person in people:
person.distance_in_miles = int(distance.distance(
(self.latitude, self.longitude),
(person.latitude, person.longitude)
).miles)
# Return the nearest X
people.sort(key=lambda x: x.distance_in_miles)
return people[:num]
def location_description_html(self):
region = ''
if self.region:
region = '<a href="%s">%s</a>' % (
self.region.get_absolute_url(), self.region.name
)
bits = self.location_description.split(', ')
if len(bits) > 1 and bits[-1] == self.region.name:
bits[-1] = region
else:
bits.append(region)
bits[:-1] = map(escape, bits[:-1])
return mark_safe(', '.join(bits))
else:
return self.location_description
def __str__(self):
return '%s' % self.user.get_full_name()
def get_absolute_url(self):
return reverse('user_profile', args=[self.user.username])
# TODO: Put in transaction
def save(self, force_insert=False, force_update=False, **kwargs):
# Update country and region counters
super().save(force_insert=False, force_update=False, **kwargs)
self.country.num_people = self.country.djangoperson_set.count()
self.country.save()
if self.region:
self.region.num_people = self.region.djangoperson_set.count()
self.region.save()
class Meta:
verbose_name = _('Django person')
verbose_name_plural = _('Django people')
def irc_tracking_allowed(self):
return not self.machinetags.filter(
namespace='privacy', predicate='irctrack', value='private',
).count()
register(DjangoPerson, tag_descriptor_attr='skilltags', tagged_item_manager_attr='skilltagged')
class PortfolioSite(models.Model):
title = models.CharField(_('Title'), max_length=100)
url = models.URLField(_('URL'), max_length=255)
contributor = models.ForeignKey(DjangoPerson, models.CASCADE, verbose_name=_('Contributor'))
def __str__(self):
return '%s <%s>' % (self.title, self.url)
class Meta:
verbose_name = _('Portfolio site')
verbose_name_plural = _('Portfolio sites')
class CountrySite(models.Model):
"Community sites for various countries"
title = models.CharField(_('Title'), max_length=100)
url = models.URLField(_('URL'), max_length=255)
country = models.ForeignKey(Country, models.CASCADE, verbose_name=_('Country'))
def __str__(self):
return '%s <%s>' % (self.title, self.url)
class Meta:
verbose_name = _('Country site')
verbose_name_plural = _('Country sites')
| {
"content_hash": "75d0ee67e0e76a6a5ecf0923d108fc17",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 96,
"avg_line_length": 36.21132075471698,
"alnum_prop": 0.6281784076698624,
"repo_name": "brutasse/djangopeople",
"id": "ba2892c2e03cb512c76c798cd279151418ff5d5a",
"size": "9596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangopeople/djangopeople/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42452"
},
{
"name": "CSS",
"bytes": "36679"
},
{
"name": "HTML",
"bytes": "53446"
},
{
"name": "JavaScript",
"bytes": "14651"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "182699"
}
],
"symlink_target": ""
} |
"""
admin: modelos de administración del systema de establecimientos
@author Camilo Ramírez
@contact camilolinchis@gmail.com
camilortte@hotmail.com
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
from datetime import datetime
#Django
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages, admin
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db import IntegrityError
#External apps
from notifications import notify
#Models
from .models import (
Categoria, Establecimiento,
SubCategoria, Imagen ,EstablecimientoTemporal,
Solicitud, TiposSolicitud,Comentario)
#Forms
from .forms import EstablecimientoAdminForm
class CommentAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar los comentarios en el admin
"""
# raw_id_fields = ('post', )
# list_display= ('author', 'post', 'body', 'is_public',)
# list_filter = ('is_public',)
# search_fields = ('post', )
# ordering = ('author',)
class Meta:
model = Comentario
class ImagenInline(admin.StackedInline):
"""
Clase encargada de presentar las imagenes inline en el modelo del establecimiento.
"""
model = Imagen
class SolicitudAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar las solicitudes en el admin
"""
list_display = ('usuarios','establecimientos','tipo_solicitudes','aprobar')
change_form_template = 'establishment/admin.html'
fieldsets = (
(None, {
'fields': ('usuarios','tipo_solicitudes', 'establecimientos', 'contenido' ,'establecimientos_temporales')
}),
('Advanced options', {
'classes': ('grp-collapse grp-closed',),
'fields': ('fecha_creada','aprobar')
}),
)
list_filter = ('tipo_solicitudes','aprobar',)
search_fields = ('usuarios', 'establecimientos', )
ordering = ('fecha_creada',)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
Se agregan al contexto los ddatos de la solicitud
"""
extra_context = extra_context or {}
try:
obj=Solicitud.objects.get(id=object_id)
id_establecimiento=obj.establecimientos.id
except Exception:
id_establecimiento=False
try:
id_establecimiento_temporal = obj.establecimientos_temporales.id
except Exception:
id_establecimiento_temporal =False
#Query de establecimiento
if id_establecimiento:
for establecimiento in Establecimiento.objects.filter(id=id_establecimiento):
establecimiento.fields = dict((field.name, field.value_to_string(establecimiento))
for field in establecimiento._meta.fields)
else:
establecimiento=False
#Query de establecimiento temporal
if id_establecimiento_temporal:
for establecimiento_temporal in EstablecimientoTemporal.objects.filter(id=id_establecimiento_temporal):
establecimiento_temporal.fields = dict((field.name, field.value_to_string(establecimiento_temporal))
for field in establecimiento_temporal._meta.fields)
else:
establecimiento_temporal=False
if establecimiento:
extra_context['establecimiento'] = establecimiento
if establecimiento_temporal:
extra_context['establecimiento_temporal'] = establecimiento_temporal
return super(SolicitudAdmin, self).change_view(request, object_id, form_url, extra_context=extra_context)
class Media:
js = (
'http://code.jquery.com/jquery-2.1.1.min.js', # jquery
'js/related_solicitud_admin.js', # project static folder
)
class Meta:
model = Solicitud
def get_readonly_fields(self, request, obj=None):
"""
Si la solicitud se aprobo ya no puede editarse
"""
if obj is not None :
if obj.aprobar:
return self.fields or [f.name for f in self.model._meta.fields]
return super(SolicitudAdmin, self).get_readonly_fields(request, obj)
def save_model(self, request, obj, form, change):
"""
Almacena la solicitud dependiendo tel tipo de solicitud al que
corresponde
"""
print obj.aprobar
if obj.aprobar:
try:
if form.cleaned_data['aprobar'] == True:
if form.cleaned_data['tipo_solicitudes'].nombre=='eliminacion':
if self.aprobar_eliminacion(request,form,obj):
obj.save()
saludo=u"Hola "+ obj.usuarios.first_name+", tu Solicitud de "\
+form.cleaned_data['tipo_solicitudes'].tag+" fue aprobada.".decode('utf-8')
notify.send(
request.user,
recipient= obj.usuarios,
verb="Solicitud aprobada",
description=saludo,
timestamp=datetime.now()
)
print "Solicitud de aliminacion Aprobada"
elif form.cleaned_data['tipo_solicitudes'].nombre=='modificacion':
if self.aprobar_modificacion(request,form,obj):
obj.save()
saludo=u"Hola "+ obj.usuarios.first_name+", tu Solicitud de "\
+form.cleaned_data['tipo_solicitudes'].tag+" fue aprobada.".decode('utf-8')
notify.send(
request.user,
recipient= obj.usuarios,
verb="Solicitud aprobada",
description=saludo,
timestamp=datetime.now()
)
print "Solicitud de modificacion Aprobada"
elif form.cleaned_data['tipo_solicitudes'].nombre=='administracion':
if self.aprobar_administracion(request,form,obj):
obj.save()
saludo=u"Hola "+ obj.usuarios.first_name+", tu Solicitud de "\
+form.cleaned_data['tipo_solicitudes'].tag+" fue aprobada.".decode('utf-8')
notify.send(
request.user,
recipient= obj.usuarios,
verb="Solicitud aprobada",
description=saludo,
timestamp=datetime.now()
)
print "Solicitud de administracion Aprobada"
elif form.cleaned_data['tipo_solicitudes'].nombre=='desactivacion':
if self.aprobar_desactivacon(request,form,obj):
obj.save()
saludo=u"Hola "+ obj.usuarios.first_name+", tu Solicitud de "\
+form.cleaned_data['tipo_solicitudes'].tag+" fue aprobada.".decode('utf-8')
notify.send(
request.user,
recipient= obj.usuarios,
verb="Solicitud aprobada",
description=saludo,
timestamp=datetime.now()
)
print "Solicitud de desactivacion Aprobada"
except Exception, e:
print "No se puede editar: ",e
#raise e
else:
obj.save()
def aprobar_desactivacon(self,request,form,obj):
"""
Aprueba desactivar un establecimeinto
"""
try:
obj.establecimientos.visible=False
obj.establecimientos.save()
self.message_user(request,
_(("Se aprobó la desactivacion de eliminacion del establecimiento"+str(obj.establecimientos)).decode("utf-8")),
level=messages.INFO, extra_tags='', fail_silently=False)
return True
except Exception, e:
print "ERROR: ", e
self.message_user(request,
_(("No Se aprobó la solicitud de eliminacion del establecimiento"+str(obj.establecimientos)).decode("utf-8")),
level=messages.ERROR, extra_tags='', fail_silently=False)
return False
#Metodo cuando se aprueba una solicitud de elmiminacion
def aprobar_eliminacion(self,request,form,obj):
"""
Aprueba eliminar un establecimiento
"""
try:
object_id=str(obj.id)
###ELIMINACION DE ESTABLECIMIENTO
obj.establecimientos.delete()
###ELIMINACION DE ESTABLECIMIENTO
super(SolicitudAdmin, self).delete_view(request, object_id, extra_context=None)
self.message_user(request,
_(("Se aprobó la solicitud de eliminacion del establecimiento"+str(obj.establecimientos)).decode("utf-8")),
level=messages.INFO, extra_tags='', fail_silently=False)
return True
except Exception, e:
print "ERROR: ", e
self.message_user(request,
_(("No Se aprobó la solicitud de eliminacion del establecimiento"+str(obj.establecimientos)).decode("utf-8")),
level=messages.ERROR, extra_tags='', fail_silently=False)
return False
#Metodo cuando se aprueba solicitud de modificacion
def aprobar_modificacion(self,request,form,obj):
"""
Aprueba modificar un establecimiento
"""
#establecimientos_temporales=EstablecimientoTemporal.objects.filter(solicitudes=obj.id)
if(obj.establecimientos_temporales):
# print self.__dict__
# print "\n", obj.__dict__
#print self.establecimientos_temporales
# self.establecimiento=self.establecimientos_temporales
establecimiento=obj.establecimientos
establecimiento_temp=EstablecimientoTemporal.objects.get(id=obj.establecimientos_temporales.id)
#establecimiento_temp=EstablecimientoTemporal.objects.filter(solicitudes=obj.id)
if (establecimiento_temp.nombre):
establecimiento.nombre = establecimiento_temp.nombre
if (establecimiento_temp.email):
establecimiento.email = establecimiento_temp.email
if (establecimiento_temp.web_page):
establecimiento.web_page = establecimiento_temp.web_page
if (establecimiento_temp.address):
establecimiento.address = establecimiento_temp.address
if (establecimiento_temp.description):
establecimiento.description = establecimiento_temp.description
if (establecimiento_temp.position):
establecimiento.position= establecimiento_temp.position
if (establecimiento_temp.sub_categorias):
establecimiento.sub_categorias= establecimiento_temp.sub_categorias
establecimiento.save()
self.message_user(request,
_(("Se aprobó la solicitud de modificacion del establecimiento"+str(obj.establecimientos)).decode("utf-8")),
level=messages.INFO, extra_tags='', fail_silently=False)
return True
else:
print "No se puede aprobar la modificacion."
self.message_user(request,
_(("No Se aprobó la solicitud, el modificación para el establecimiento"+str(obj.establecimientos)).decode('utf-8')),
level=messages.INFO, extra_tags='', fail_silently=False)
return False
#Metodo cuandop se aprueba solicitud de administracion
def aprobar_administracion(self,request,form,obj):
"""
Apruba la administracón de un establecimiento.
"""
if(obj.usuarios):
establecimientos=obj.establecimientos
print Establecimiento.objects.filter(administradores=obj.usuarios.id,id=establecimientos.id)
print "USUARIO ID ", obj.usuarios.id
if not (Establecimiento.objects.filter(administradores=obj.usuarios.id,id=establecimientos.id)):
establecimientos.administradores.add(obj.usuarios)
print obj.usuarios
self.message_user(request,
_(("Se aprobó la solicitud, el usuario "+str(obj.usuarios)+" ya es administrador de "+str(obj.establecimientos)).decode("utf-8")),
level=messages.INFO, extra_tags='', fail_silently=False)
if not obj.usuarios.is_organizacional() and not obj.usuarios.is_superuser:
obj.usuarios.change_to_organizational()
obj.usuarios.save()
print "El usuario",obj.usuarios,"Se cambia a usuario organizacional"
else:
print "Mierda"
return True
else:
self.message_user(request,
("No Se aprobó la solicitud, el usuario "+str(obj.usuarios)+" es ahora administrador de "+str(obj.establecimientos)).decode("utf-8"),
level=messages.ERROR, extra_tags='', fail_silently=False)
return False
else:
return False
def response_delete(request, obj_display):
"""
Redirige cuando eliminan establecimiento
"""
return HttpResponseRedirect(reverse('admin:establishment_system'))
class ImagenAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar las imagines en el admin
"""
list_display = ('id','imagen_thumbnail','establecimientos','date_uploaded','usuarios')
list_filter = ('usuarios', 'establecimientos', )
search_fields = ('usuarios', 'establecimientos', )
ordering = ('date_uploaded',)
class Meta:
model=Imagen
class CategoriasAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar las caegorias en el admin
"""
list_display = ('id','tag',)
search_fields = ('tag', )
class Meta:
model=Categoria
class SubCategoriasAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar las caegorias en el admin
"""
list_display = ('id','tag','categorias',)
list_filter = ('categorias', )
search_fields = ('nombre', 'tag', )
class Meta:
model=SubCategoria
class TipoSolicitudAdmin(admin.ModelAdmin):
u"""
Clase encargada de presentar las caegorias en el admin
"""
list_display = ('id','tag',)
list_filter = ('tag', )
search_fields = ('nombre', 'tag', )
class Meta:
model=TiposSolicitud
admin.site.register(Categoria, CategoriasAdmin)
admin.site.register(SubCategoria, SubCategoriasAdmin)
admin.site.register(Imagen,ImagenAdmin)
admin.site.register(EstablecimientoTemporal)
admin.site.register(Solicitud, SolicitudAdmin)
admin.site.register(TiposSolicitud,TipoSolicitudAdmin)
admin.site.register(Comentario, CommentAdmin)
"""
Modificación del mapa de GeoDjango
"""
from django.contrib.gis import admin
from django.contrib.gis.geos import GEOSGeometry
class EstablecimientoAdmin(admin.OSMGeoAdmin):
g = GEOSGeometry('POINT (-74.157175 4.578896)') # Set map center
g.set_srid(4326)
#g.transform(900913)
default_lon = int(g.x)
default_lat = int(g.y)
default_zoom = 11
extra_js = ["http://maps.google.com/maps/api/js?key=AIzaSyCvfyKIBeaLLGXbF5HS73ZcfmDhPtM05rA&sensor=true"]
map_template = 'admin/gmgdav3.html'
list_display = ('nombre','id','email','web_page','address','visible','sub_categorias')
filter_horizontal=('administradores',)
form=EstablecimientoAdminForm
list_select_related = ('imagen',)
inlines = [ ImagenInline ]
list_filter = ('visible','sub_categorias' )
search_fields = ('nombre','web_page','address', )
class Media:
js = (
'http://code.jquery.com/jquery-2.1.1.min.js', # jquery
'js/update_categoria_admin.js', # project static folder
)
admin.site.register(Establecimiento, EstablecimientoAdmin) | {
"content_hash": "7dd4641bfdf38491930cfdde91dbb136",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 157,
"avg_line_length": 41.23419203747073,
"alnum_prop": 0.5556880786051003,
"repo_name": "camilortte/RecomendadorUD",
"id": "3e13485672cb1ee4753e9998f462915620c9ade4",
"size": "17646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/establishment_system/admin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "169155"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "HTML",
"bytes": "267644"
},
{
"name": "JavaScript",
"bytes": "1055584"
},
{
"name": "PHP",
"bytes": "52919"
},
{
"name": "Python",
"bytes": "400602"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
} |
import Axon
import struct
import random
from Kamaelia.Util.Backplane import subscribeTo
from KPI.Crypto import xtea
class Authenticator(Axon.Component.component):
Inboxes = {"inbox" : "authentication and data packets"}
Outboxes = {"outbox" : "authentication",
"notifyuser" : "user notification"}
def __init__(self, kpidb):
super(Authenticator,self).__init__()
self.kpidb = kpidb
def main(self):
kpidb = self.kpidb
while not self.dataReady("inbox"):
yield 1
data = self.recv("inbox")
padding,userid = struct.unpack('!2L',
xtea.xtea_decrypt(kpidb.getRootKey(),data))
print "Authenticator received userid:", userid
if kpidb.isValidUser(userid) == False:
print "Invalid UserID" # todo shutdown
return
challenge = random.getrandbits(32)
temp = struct.pack('!2L',0, challenge)
userkey = kpidb.getKPIKeys().getKey(userid)
data = xtea.xtea_encrypt(userkey, temp)
print data, challenge, userkey
self.send(data, "outbox")
yield 1
while not self.dataReady("inbox"):
yield 1
data = self.recv("inbox")
temp = xtea.xtea_decrypt(userkey,data)
padding, response = struct.unpack('!2L', temp)
print data, response
if response == challenge + 1:
self.send("SUCCESS", "outbox")
yield 1
else:
print "authenication failure"
return # shutdown
#new user added
self.send(userid, "notifyuser")
#subscribe to data Management back plane
subscriber = subscribeTo("DataManagement")
self.link( (subscriber, "outbox"), (self, "outbox"), passthrough=2)
subscriber.activate()
yield 1
while 1:
yield 1
| {
"content_hash": "92995be9697f6bcbd6bb66a295b26622",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 31.966666666666665,
"alnum_prop": 0.5735140771637122,
"repo_name": "sparkslabs/kamaelia",
"id": "aa66cad7dc2d4e12dca2d5926b7011b671aabf7c",
"size": "2741",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/AM/KPIFramework/KPI/Server/Authenticator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "M4",
"bytes": "12224"
},
{
"name": "Makefile",
"bytes": "150947"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "OCaml",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Python",
"bytes": "18900785"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707588"
}
],
"symlink_target": ""
} |
__author__ = 'olav'
from aerial import * | {
"content_hash": "3697f10e66cc68fada473a154fa02b21",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 20,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.6341463414634146,
"repo_name": "olavvatne/CNN",
"id": "64fd4e5633f092ab8d932a46d4a2097968bd275f",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/visualize/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88387"
},
{
"name": "Shell",
"bytes": "822"
}
],
"symlink_target": ""
} |
import os
from termcolor import colored
from launchpadlib import launchpad
from bugsquasher.utils import jsonutils
class LP(object):
local_file = ".launchpad.json"
@staticmethod
def lp(config, section):
cache = section.get("cache_dir",
"~/.bugsquasher/cache")
return launchpad.Launchpad.login_anonymously('bugsquasher',
'production',
cache)
@classmethod
def on_take(cls, config, section, bug):
lp = cls.lp(config, section)
bug = lp.bugs[bug]
summary = {
"title": bug.title,
"description": bug.description
}
for msg in bug.messages:
comment = {
"author": msg.owner.name,
"content": msg.content,
"time": msg.date_created.isoformat(),
}
summary.setdefault("comments", []).append(comment)
with open(cls.local_file, 'wab') as f:
f.write(jsonutils.dumps(summary, ensure_ascii=True, indent=4))
@classmethod
def on_show(cls, config, section, **kwargs):
if os.path.exists(cls.local_file):
with open(cls.local_file, "rb") as f:
summary = jsonutils.loads(f.read())
print colored("Bug Title: ", 'green') + summary["title"]
#print "\tStatus: %s" % summary["status"]
print "\t%s comments" % len(summary["comments"])
if config.verbose:
for idx, comment in enumerate(summary["comments"]):
print colored("\tComment: %s" % (idx + 1), 'yellow')
header = "\tAuthor: %s, Date %s" % \
(comment["author"], comment["time"])
print colored(header, 'yellow')
print "\t%s" % comment["content"].replace("\n", "\n\t")
print "\t"
on_list = on_show
| {
"content_hash": "ea8b71b7eeea2f0482f39ff4f7302639",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 32.435483870967744,
"alnum_prop": 0.4987568373943312,
"repo_name": "FlaPer87/bugsquasher",
"id": "d2218286b73c39b9fd6603352455b8cfd7e5e046",
"size": "2011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bugsquasher/plugins/trackers/lp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35620"
}
],
"symlink_target": ""
} |
import logging
from abc import ABCMeta
from hvac.api.vault_api_base import VaultApiBase
logger = logging.getLogger(__name__)
class SystemBackendMixin(VaultApiBase):
"""Base class for System Backend API endpoints."""
__metaclass__ = ABCMeta
| {
"content_hash": "cc23ddaede2ba5ccd6734482f909e382",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 54,
"avg_line_length": 21.083333333333332,
"alnum_prop": 0.7430830039525692,
"repo_name": "ianunruh/hvac",
"id": "5ab06493d557e9bcbfce2e962b97fcb8adc63725",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hvac/api/system_backend/system_backend_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "194"
},
{
"name": "Makefile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "224553"
},
{
"name": "Shell",
"bytes": "1347"
}
],
"symlink_target": ""
} |
from shortuuid.main import decode
from shortuuid.main import encode
from shortuuid.main import get_alphabet
from shortuuid.main import random
from shortuuid.main import set_alphabet
from shortuuid.main import ShortUUID
from shortuuid.main import uuid
__version__ = "1.0.11"
__all__ = [
"decode",
"encode",
"get_alphabet",
"random",
"set_alphabet",
"ShortUUID",
"uuid",
]
| {
"content_hash": "4f154446b566e466f75e29505a4ea963",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 39,
"avg_line_length": 22.22222222222222,
"alnum_prop": 0.7025,
"repo_name": "skorokithakis/shortuuid",
"id": "58582eec9904769ec2555a20017a360e78013a35",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shortuuid/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15160"
}
],
"symlink_target": ""
} |
from django.template import RequestContext
from django.http import HttpResponseNotFound, HttpResponse
def hash_link(request, key):
"""
all hash links go to this function.
"""
from models import HashLink
if key:
return_dict = {}
HashLink.verify_and_call_action_function(request,key, return_dict) #may call your registered function or the global default_action function
verified = return_dict['verified']
if 'return_obj' in return_dict:
return return_dict['return_obj'] #registered function should return render_to_response or HttpResponse
elif 'action' not in return_dict:
return HttpResponse('Invalid action.') #maybe verified or not, check ['verified'] but no registered function
else:
return HttpResponse('Not verified.')
return HttpResponse('Invalid link.')
def default_action_on_error(request, has_error, error_code, hash_link, content_obj):
return HttpResponseNotFound("Permission denied.")
def test_success(request, has_error, error_code, hash_link, content_obj):
"""
use hashphrase_register decorator to register this function to be called when
users click on the email link.
be sure to check has_error. If not verified, has_error = True
See HashLink class for error code definition
"""
if has_error or not hash_link or not content_obj:
from hashphrase.models import HashLink
ret = "Invalid email link."
if error_code == HashLink.ERR_EXPIRED:
ret = "Link expired."
elif error_code == HashLink.ERR_INVALID_USER:
ret = "Needs to login."
elif error_code == HashLink.ERR_INVALID_LINK:
ret = "Invalid link."
return HttpResponse(ret)
return HttpResponse("Successful.")
def hash_link_test(request):
from django.conf import settings
if not hasattr(settings, 'DEBUG') or not settings.DEBUG:
return HttpResponseNotFound()
from models import HashLink
from django.contrib.auth.models import User
user = User.objects.get(id=1)
from . import hashphrase_functions
cur_datetime = hashphrase_functions.current_datetime_function()
action = 'default_action2'
hash_phrase = HashLink.gen_key(request.user, user, cur_datetime, action=action)
from django.template import Template
template = Template("""{{ verified }}<a href="/hl/{{ hash_phrase }}">test hash link {{ hash_phrase }}</a>""")
c = RequestContext(request, {'hash_phrase': hash_phrase,'verified':''})
rendered = template.render(c)
return HttpResponse(rendered)
| {
"content_hash": "92ef9eeee1ce2086a2926f98b2fcdb93",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 147,
"avg_line_length": 38.97014925373134,
"alnum_prop": 0.6813481424741479,
"repo_name": "peiwei/django-hashphrase",
"id": "a8cc9b3c8cb61e08f3c0e1913bed7df7aa8ac627",
"size": "2611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hashphrase/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15300"
}
],
"symlink_target": ""
} |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def plot_residuals(dataframe, predictions):
'''
Using the same methods that we used to plot a histogram of entries
per hour for our data, why don't you make a histogram of the residuals
(that is, the difference between the original data and the predicted values).
Based on this residual histogram, do you have any insight into how our model
performed? Reading a bit on this webpage might be useful:
http://www.itl.nist.gov/div898/handbook/pri/section2/pri24.htm
'''
plt.figure()
#
# your code here
#
(turnstile_weather['ENTRIESn_hourly'] - predictions).hist()
return plt
def normalize_features(array):
"""
Normalize the features in our data set.
"""
array_normalized = (array-array.mean())/array.std()
mu = array.mean()
sigma = array.std()
return array_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values, and the values for our thetas.
"""
m = len(values)
sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
"""
m = len(values)
cost_history = []
for i in range(num_iterations):
predicted_values = np.dot(features, theta)
theta = theta - alpha / m * np.dot((predicted_values - values), features)
cost = compute_cost(features, values, theta)
cost_history.append(cost)
return theta, pandas.Series(cost_history)
def predictions(dataframe):
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']].join(dummy_units)
values = dataframe[['ENTRIESn_hourly']]
m = len(values)
features, mu, sigma = normalize_features(features)
features['ones'] = np.ones(m)
features_array = np.array(features)
values_array = np.array(values).flatten()
#Set values for alpha, number of iterations.
alpha = 0.1
num_iterations = 75
#Initialize theta, perform gradient descent
theta_gradient_descent = np.zeros(len(features.columns))
theta_gradient_descent, cost_history = gradient_descent(features_array, values_array, theta_gradient_descent,
alpha, num_iterations)
predictions = np.dot(features_array, theta_gradient_descent)
return predictions
if __name__ == "__main__":
input_filename = "turnstile_data_master_with_weather.csv"
turnstile_master = pd.read_csv(input_filename)
prediction_values = predictions(turnstile_master)
image = "plot.png"
plt = plot_residuals(turnstile_master, prediction_values)
plt.savefig(image)
| {
"content_hash": "f7b1a597864fb5678322ab3a0fffdfee",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 113,
"avg_line_length": 31.659574468085108,
"alnum_prop": 0.6676747311827957,
"repo_name": "napjon/moocs_solution",
"id": "efb163b700d4875774512ed1aa596756056c68d8",
"size": "2976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Data_Science/project_3/plot_residuals/prediction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4046"
},
{
"name": "Jupyter Notebook",
"bytes": "9892144"
},
{
"name": "Matlab",
"bytes": "300578"
},
{
"name": "Python",
"bytes": "441602"
},
{
"name": "R",
"bytes": "7797"
},
{
"name": "Shell",
"bytes": "681"
}
],
"symlink_target": ""
} |
'''
largely adapts: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py
goal is to show linear dependency between number of variables in SELECT statement and execution time
'''
from __future__ import print_function
from bio_select_variables import *
import tensorflow as tf
import numpy
import sys, re
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = numpy.array([])
train_Y = numpy.array([])
# # Testing example, as requested (Issue #2)
# test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
# test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
n_samples = 0
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
def load_data():
db = readout_feature()
global train_X
global train_Y
# db_split = re.findall('^(.*?)\n', db, re.DOTALL)
for entry in (line for i, line in enumerate(db) if i<=250):
# print(entry)
entry = re.split(r'[\t|\n]', entry)
train_X = numpy.append(train_X,float(entry[0]))
train_Y = numpy.append(train_Y,float(entry[1]))
return db
def linear_model():
# Construct a linear model
return tf.add(tf.mul(X, W), b)
def train_linear_model(data):
pred = linear_model()
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*len(data))
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
# print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
# "W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
# print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# print("Testing... (Mean square loss Comparison)")
# testing_cost = sess.run(
# tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
# feed_dict={X: test_X, Y: test_Y}) # same function as cost above
# print("Testing cost=", testing_cost)
# print("Absolute mean square loss difference:", abs(
# training_cost - testing_cost))
# plt.plot(test_X, test_Y, 'bo', label='Testing data')
# plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
# plt.legend()
# plt.show()
def main():
data = load_data()
train_linear_model(data)
if __name__ == '__main__':
main() | {
"content_hash": "3ce51ca1d257a44243188fd464423dfe",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 124,
"avg_line_length": 27.189655172413794,
"alnum_prop": 0.6506024096385542,
"repo_name": "derdav3/tf-sparql",
"id": "f1ddf38a93344a8ca26c81f1cb356652a1d617cd",
"size": "3154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/BioPortal/older try/linear_regression_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "1854"
},
{
"name": "Python",
"bytes": "323046"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
} |
import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_cast_time_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to time value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that truncates the given time field field_name to a time object with
only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
# RemovedInDjango20Warning
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
def to_unicode(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
| {
"content_hash": "f1578a86187b8b6a7bdfd225c006262e",
"timestamp": "",
"source": "github",
"line_count": 621,
"max_line_length": 119,
"avg_line_length": 38.371980676328505,
"alnum_prop": 0.636787108145537,
"repo_name": "sgzsh269/django",
"id": "bf9af0149a0f0a762fe88e42c98038c77edf3d6d",
"size": "23829",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/backends/base/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52508"
},
{
"name": "HTML",
"bytes": "173554"
},
{
"name": "JavaScript",
"bytes": "451012"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11983612"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Lists all firewall rules for an IP address."""
from baseCmd import *
from baseResponse import *
class listFirewallRulesCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list resources by account. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""list only resources belonging to the domain specified"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""list resources by display flag; only ROOT admin is eligible to pass this parameter"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""Lists rule with the specified ID."""
self.id = None
self.typeInfo['id'] = 'uuid'
"""the ID of IP address of the firewall services"""
self.ipaddressid = None
self.typeInfo['ipaddressid'] = 'uuid'
"""defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."""
self.isrecursive = None
self.typeInfo['isrecursive'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"""
self.listall = None
self.typeInfo['listall'] = 'boolean'
"""list firewall rules for certain network"""
self.networkid = None
self.typeInfo['networkid'] = 'uuid'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list objects by project"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""List resources by tags (key/value pairs)"""
self.tags = []
self.typeInfo['tags'] = 'map'
self.required = []
class listFirewallRulesResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the firewall rule"""
self.id = None
self.typeInfo['id'] = 'string'
"""the cidr list to forward traffic from"""
self.cidrlist = None
self.typeInfo['cidrlist'] = 'string'
"""the ending port of firewall rule's port range"""
self.endport = None
self.typeInfo['endport'] = 'integer'
"""is rule for display to the regular user"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""error code for this icmp message"""
self.icmpcode = None
self.typeInfo['icmpcode'] = 'integer'
"""type of the icmp message being sent"""
self.icmptype = None
self.typeInfo['icmptype'] = 'integer'
"""the public ip address for the firewall rule"""
self.ipaddress = None
self.typeInfo['ipaddress'] = 'string'
"""the public ip address id for the firewall rule"""
self.ipaddressid = None
self.typeInfo['ipaddressid'] = 'string'
"""the network id of the firewall rule"""
self.networkid = None
self.typeInfo['networkid'] = 'string'
"""the protocol of the firewall rule"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the starting port of firewall rule's port range"""
self.startport = None
self.typeInfo['startport'] = 'integer'
"""the state of the rule"""
self.state = None
self.typeInfo['state'] = 'string'
"""the list of resource tags associated with the rule"""
self.tags = []
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"content_hash": "4744aebac59ec034f84c08c13c84d206",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 178,
"avg_line_length": 37.97457627118644,
"alnum_prop": 0.5811202856505244,
"repo_name": "MissionCriticalCloud/marvin",
"id": "f1d0781f42e27c99a807b2b5dca3a5e7a5610329",
"size": "4481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marvin/cloudstackAPI/listFirewallRules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2573421"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DimensiondefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="dimensiondefaults", parent_name="splom", **kwargs):
super(DimensiondefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Dimension"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
| {
"content_hash": "22a247c9a404838b688dd6b1d858bfe3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 34.0625,
"alnum_prop": 0.5779816513761468,
"repo_name": "plotly/plotly.py",
"id": "fa1e8792ab8bd12c7576a864277f85f7e534c8d4",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/splom/_dimensiondefaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeRegionsRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'rds.aliyuncs.com.DescribeRegions.2014-08-15'
| {
"content_hash": "4432065bd7131239103854d7cfc6410f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 54,
"avg_line_length": 30,
"alnum_prop": 0.7433333333333333,
"repo_name": "francisar/rds_manager",
"id": "1d0063e37a23f85a67438d9ca672ac2496221004",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Rds20140815DescribeRegionsRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259509"
},
{
"name": "Shell",
"bytes": "1481"
}
],
"symlink_target": ""
} |
'''
if you are using multicore machine (and i can bet that you do) this script demonstrates how to use all
cores, please beware to set line 127 to a number which divides number of columns without reminder
(for ex. if you have x step equal to 0.5 you will have 10/0.5 = 20 columns hence you can use 2, 4, 5, 10 or 20 parallel processes)
'''
__author__ = 'Valeriu Radu: https://www.kaggle.com/valeriur'
import multiprocessing
from multiprocessing import Process
from multiprocessing import Manager
import math
import xgboost
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
import datetime
import time
import os
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from ml_metrics import mapk
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
def prepare_data(df):
"""
Feature engineering
"""
minute = df.time % 60
df['hour'] = df['time'] // 60
#df.drop(['time'], axis=1, inplace=True)
df['weekday'] = df['hour'] // 24
df['month'] = df['weekday'] // 30
df['year'] = (df['weekday'] // 365 + 1) * 10.0
df['hour'] = ((df['hour'] % 24 + 1) + minute / 60.0) * 4.0
df['weekday'] = (df['weekday'] % 7 + 1) * 3.0
df['month'] = (df['month'] % 12 + 1) * 2.0
df['accuracy'] = np.log10(df['accuracy']) * 10.0
return df
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = prepare_data(train)
test = prepare_data(test)
print(train.shape)
print(test.shape)
def calculate_distance(distances):
return distances ** -2
def process_column(x_min, x_max, y_ranges, x_end, y_end, train, test, preds_total):
start_time_column = time.time()
preds_total[x_min] = pd.DataFrame();
for y_min, y_max in y_ranges:
start_time_cell = time.time()
if x_max == x_end:
x_max = x_max + 0.001
if y_max == y_end:
y_max = y_max + 0.001
train_cell = train[(train['x'] >= x_min - 0.03) &
(train['x'] < x_max + 0.03) &
(train['y'] >= y_min - 0.015) &
(train['y'] < y_max + 0.015)]
add_data = train_cell[train_cell.hour<10]# add data for periodic time that hit the boundary
add_data.hour = add_data.hour+96
train_cell = train_cell.append(add_data)
add_data = train_cell[train_cell.hour>90]
add_data.hour = add_data.hour-96
train_cell = train_cell.append(add_data)
del add_data
train_cell = train_cell.drop(['time'], axis=1)
train_cell = train_cell.groupby("place_id").filter(lambda x: len(x) >= 8)
test_cell = test[(test['x'] >= x_min) &
(test['x'] < x_max) &
(test['y'] >= y_min) &
(test['y'] < y_max)]
row_ids = test_cell['row_id'].reset_index(drop=True);
test_cell = test_cell.drop(['row_id', 'time'], axis=1)
#Feature engineering on x and y
train_cell.loc[:,'x'] *= 490.0
train_cell.loc[:,'y'] *= 980.0
test_cell.loc[:,'x'] *= 490.0
test_cell.loc[:,'y'] *= 980.0
le = LabelEncoder()
y = le.fit_transform(train_cell.place_id.values)
X = train_cell.drop(['row_id', 'place_id'], axis=1)
#Applying the classifier
clf = KNeighborsClassifier(n_neighbors=np.floor(np.sqrt(y.size)/5.2632).astype(int),
weights=calculate_distance, metric='manhattan',n_jobs=-1)
clf.fit(X, y)
y_pred = clf.predict_proba(test_cell.values)
preds = le.inverse_transform(np.argsort(y_pred, axis=1)[:,::-1][:,:3])
preds = pd.DataFrame.from_dict(preds)
preds['row_id'] = row_ids;
preds = preds.set_index('row_id')
preds.index.name = 'row_id';
preds_total[x_min] = pd.concat([preds_total[x_min], preds], axis=0);
print("Elapsed time column: %s minutes" % ((time.time() - start_time_column)/60))
def model(x_ranges, y_ranges, x_end, y_end, train, test):
start_time = time.time()
jobs = []
mgr = Manager()
preds_total = mgr.dict();
for x_min, x_max in x_ranges:
p = multiprocessing.Process(target=process_column, args=(x_min, x_max, y_ranges, \
x_end, y_end, train, test, preds_total))
jobs.append(p)
p.start()
if len(jobs) == 2:
for proc in jobs:
proc.join();
jobs = [];
print("Elapsed time overall: %s minutes" % ((time.time() - start_time)/60))
preds_total = pd.concat(preds_total.values(), axis=0);
print(preds_total.shape)
return preds_total.sort_index();
def xfrange(start, end, step):
gens = [];
end = round(end, 2)
start = round(start, 2)
while(start < end):
gens.append(start)
start = round(start + step, 2)
return gens
def gen_ranges(start, end, step):
return zip(xfrange(start, end, step), xfrange(start + step, end + step, step));
size = 10.0;
x_step = 0.5
y_step = 0.25
x_ranges = gen_ranges(0, size, x_step);
y_ranges = gen_ranges(0, size, y_step);
preds_total = model(x_ranges, y_ranges, size, size, train, test)
preds_total = preds_total.applymap(str)
preds_total.columns = ['l1', 'l2', 'l3'];
print('Writing submission file')
preds_total = preds_total.l1.str.cat([preds_total.l2, preds_total.l3], sep=' ')
preds_total.columns = ['place_id'];
preds_total.to_csv('submission_sample_parallel.csv', index=True, header=True, index_label='row_id') | {
"content_hash": "9601e5e38e1aa603e33025b7d4930094",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 130,
"avg_line_length": 34.00581395348837,
"alnum_prop": 0.5681313044964952,
"repo_name": "autumn-lake/Facebook-V-Predicting-Check-Ins",
"id": "6dadb4b416e5aa984425b4fc070fb6ddc154e609",
"size": "5865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "valeriu parallel knn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1153873"
},
{
"name": "Python",
"bytes": "15889"
}
],
"symlink_target": ""
} |
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0103_migrate_old_user_interest'),
]
operations = [
migrations.AddField(
model_name='startupupdate',
name='acquired_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_annualized_revenue_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_total_funding_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='currency_type',
field=models.CharField(
choices=[
('USD', 'USD'), ('GBP', 'GBP'),
('EUR', 'EUR'), ('JPY', 'JPY'),
('AUD', 'AUD'), ('CAD', 'CAD'),
('CHF', 'CHF'), ('NZD', 'NZD'),
('NGN', 'NGN'), ('MXN', 'MXN')],
default='USD',
max_length=3,
verbose_name='Status Currency'),
),
migrations.AddField(
model_name='startupupdate',
name='ipo_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_annualized_revenue',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_total_funding',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_valuation',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation'),
),
]
| {
"content_hash": "8e80a666af0bfbce352b677c3240b4b2",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 69,
"avg_line_length": 32.495327102803735,
"alnum_prop": 0.45930399769916597,
"repo_name": "masschallenge/django-accelerator",
"id": "18b83b8b77fa95d5b5357a1d35560b54fc13c3c7",
"size": "3527",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator/migrations/0104_update_startupupdate_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
} |
from joommf.energies.demag import Demag
def test_demag_mif():
demag = Demag()
mif_string = demag.get_mif()
assert 'Specify Oxs_Demag {}' in mif_string
assert demag.__repr__() == "This is the energy class of type Demag"
def test_demag_formatting():
demag = Demag()
mif_string = demag.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| {
"content_hash": "6ffa952b024ddcaf518dbedc23248bde",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 28,
"alnum_prop": 0.6190476190476191,
"repo_name": "fangohr/oommf-python",
"id": "d24e41bd43329d60a1bb273a068d177d8b908cf5",
"size": "420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "joommf/tests/test_demag.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "161"
},
{
"name": "Emacs Lisp",
"bytes": "4312"
},
{
"name": "Jupyter Notebook",
"bytes": "800026"
},
{
"name": "Makefile",
"bytes": "499"
},
{
"name": "Python",
"bytes": "230266"
},
{
"name": "Shell",
"bytes": "3509"
},
{
"name": "TeX",
"bytes": "3423"
}
],
"symlink_target": ""
} |
import json
import mock
from django.http import JsonResponse, HttpResponseRedirect
from django.test import TestCase, RequestFactory
from django.views.generic import View
from django.contrib.sessions.middleware import SessionMiddleware
from my_app.lib.ttam_api.ttam_api.django.views import AuthenticatedMixin
from my_app.lib.ttam_api.ttam_api.django import client
from my_app.lib.ttam_api.tests.fixtures.account import multiprofile_account, single_profile_account
class AuthView(AuthenticatedMixin, View):
"""view to test auth mixin"""
def get(self, request):
api = getattr(request.ttam, 'api', None)
data = {
'current_profile': getattr(request.ttam, 'current_profile', None),
'has_ttam_api': api is not None
}
return JsonResponse(data)
class TestAuthenticatedMixinDispatch(TestCase):
accounts_response = single_profile_account
multi_profile_accounts_response = multiprofile_account
def setUp(self):
self.req = RequestFactory().get('/')
SessionMiddleware().process_request(self.req)
self.req.session.save()
self.view = AuthView()
@mock.patch.object(client, 'get')
def _get_response(self, mock_client_get):
mock_client_get.return_value = mock.MagicMock(**{'get.return_value': self.accounts_response})
return self.view.dispatch(self.req)
@mock.patch.object(client, 'get')
def _get_multi_profile_response_data(self, mock_client_get):
mock_client_get.return_value = mock.MagicMock(**{'get.return_value': self.multi_profile_accounts_response})
res = self.view.dispatch(self.req)
return json.loads(res.content.decode('utf-8'))
def _get_response_data(self):
res = self._get_response()
return json.loads(res.content.decode('utf-8'))
@mock.patch.object(client, 'get_tokens')
def test_attaches_profile(self, mock_client_get_tokens):
mock_client_get_tokens.return_value = ('access_token', 'refresh_token')
data = self._get_response_data()
expected_profile = self.accounts_response['data'][0]['profiles'][0]
assert data['current_profile'] == expected_profile
@mock.patch.object(client, 'get_tokens')
def test_attaches_fisrt_profile_for_multiprofile_account(self, mock_client_get_tokens):
mock_client_get_tokens.return_value = ('access_token', 'refresh_token')
data = self._get_multi_profile_response_data()
expected_profile = self.multi_profile_accounts_response['data'][0]['profiles'][0]
assert data['current_profile'] == expected_profile
@mock.patch.object(client, 'get_tokens')
def test_attaches_ttam_api(self, mock_client_get_tokens):
mock_client_get_tokens.return_value = ('access_token', 'refresh_token')
data = self._get_response_data()
assert data['has_ttam_api']
@mock.patch.object(client, 'get_tokens')
def test_redirects_if_no_access_token(self, mock_client_get_tokens):
mock_client_get_tokens.return_value = (None, 'refresh_token')
res = self._get_response()
assert res.status_code == 302
assert isinstance(res, HttpResponseRedirect)
@mock.patch.object(client, 'get_tokens')
def test_redirects_if_no_refresh_token(self, mock_client_get_tokens):
mock_client_get_tokens.return_value = ('access_token', None)
res = self._get_response()
assert res.status_code == 302
assert isinstance(res, HttpResponseRedirect)
| {
"content_hash": "9bcaac1dd51fd45b91ebe77bcb92e18c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 115,
"avg_line_length": 38.130434782608695,
"alnum_prop": 0.6804446978335233,
"repo_name": "ortutay/23andme-phenotypes-hackathon",
"id": "1409e0c08cec40d64acf59e860b0d1dd81058863",
"size": "3508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_app/lib/ttam_api/tests/test_authenticated_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "868"
},
{
"name": "HTML",
"bytes": "5920"
},
{
"name": "JavaScript",
"bytes": "4429"
},
{
"name": "Makefile",
"bytes": "2551"
},
{
"name": "Python",
"bytes": "66424"
},
{
"name": "Shell",
"bytes": "1800"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import os
import platform
import sys
from pkg_resources import resource_string
from rbtools.commands import Command
class SetupCompletion(Command):
"""Setup auto-completion for rbt.
By default, the command installs an auto-completion file for the user's
login shell. The user can optionally specify a shell for which the command
will install the auto-completion file for.
"""
name = 'setup-completion'
author = 'The Review Board Project'
description = 'Setup auto-completion for bash or zsh.'
args = '<shell>'
#: A dictionary of supported shells.
#:
#: Each shell contains paths to its completion file and the directory
#: where the file will be installed.
SHELLS = {
'bash': {
'Linux': {
'src': 'commands/conf/rbt-bash-completion',
'dest': '/etc/bash_completion.d',
'filename': 'rbt',
},
'Darwin': {
'src': 'commands/conf/rbt-bash-completion',
'dest': '/usr/local/etc/bash_completion.d',
'filename': 'rbt',
},
},
'zsh': {
'Linux': {
'src': 'commands/conf/_rbt-zsh-completion',
'dest': '/usr/share/zsh/functions/Completion',
'filename': '_rbt',
},
'Darwin': {
'src': 'commands/conf/_rbt-zsh-completion',
'dest': '/usr/share/zsh/site-functions',
'filename': '_rbt',
},
}
}
def setup(self, shell):
"""Install auto-completions for the appropriate shell.
Args:
shell (str):
String specifying name of shell for which auto-completions
will be installed for.
"""
system = platform.system()
script = resource_string('rbtools', self.SHELLS[shell][system]['src'])
dest = os.path.join(self.SHELLS[shell][system]['dest'],
self.SHELLS[shell][system]['filename'])
try:
with open(dest, 'w') as f:
f.write(script)
except IOError as e:
logging.error('I/O Error (%s): %s' % (e.errno, e.strerror))
sys.exit()
print('Successfully installed %s auto-completions.' % shell)
print('Restart the terminal for completions to work.')
def main(self, shell=None):
"""Run the command.
Args:
shell (str):
An optional string specifying name of shell for which
auto-completions will be installed for.
"""
if not shell:
shell = os.environ.get(b'SHELL')
if shell:
shell = os.path.basename(shell)
else:
logging.error('No login shell found. Re-run the command with '
'a shell as an argument or refer to manual '
'installation in documentation')
sys.exit()
if shell in self.SHELLS:
system = platform.system()
if system in self.SHELLS[shell]:
if os.path.exists(self.SHELLS[shell][system]['dest']):
self.setup(shell)
else:
logging.error('Could not locate %s completion directory. '
'Refer to manual installation in '
'documentation',
shell)
else:
logging.error('The %s operating system is currently '
'unsupported',
system)
else:
logging.error('The shell "%s" is currently unsupported',
shell)
| {
"content_hash": "cbc9b64791ab82e7e8584c6cbac28ccc",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 33.70434782608696,
"alnum_prop": 0.5079979360165119,
"repo_name": "davidt/rbtools",
"id": "262613ea62288cc3c4b7afbe0ef3e960738d7705",
"size": "3876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rbtools/commands/setup_completion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9410"
},
{
"name": "HTML",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "746695"
},
{
"name": "Shell",
"bytes": "39731"
}
],
"symlink_target": ""
} |
import json
from unittest import mock
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.core.models import Collection, GroupCollectionPermission, Page
from wagtail.documents import models
from wagtail.tests.testapp.models import EventPage, EventPageRelatedLink
from wagtail.tests.utils import WagtailTestUtils
class TestDocumentIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/index.html')
self.assertContains(response, "Add a document")
def test_search(self):
response = self.client.get(reverse('wagtaildocs:index'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def make_docs(self):
for i in range(50):
document = models.Document(title="Test " + str(i))
document.save()
def test_pagination(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/index.html')
# Check that we got the correct page
self.assertEqual(response.context['documents'].number, 2)
def test_pagination_invalid(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/index.html')
# Check that we got page one
self.assertEqual(response.context['documents'].number, 1)
def test_pagination_out_of_range(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/index.html')
# Check that we got the last page
self.assertEqual(response.context['documents'].number, response.context['documents'].paginator.num_pages)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.client.get(reverse('wagtaildocs:index'), {'ordering': ordering})
self.assertEqual(response.status_code, 200)
def test_index_without_collections(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'))
self.assertNotContains(response, '<th>Collection</th>')
self.assertNotContains(response, '<td>Root</td>')
def test_index_with_collection(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
root_collection.add_child(name="Good plans")
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'))
self.assertContains(response, '<th>Collection</th>')
self.assertContains(response, '<td>Root</td>')
self.assertEqual(
[collection.name for collection in response.context['collections']],
['Root', 'Evil plans', 'Good plans'])
class TestDocumentAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_get(self):
response = self.client.get(reverse('wagtaildocs:add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/add.html')
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
def test_get_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.client.get(reverse('wagtaildocs:add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/add.html')
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
def test_post(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
}
response = self.client.post(reverse('wagtaildocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be created, and be placed in the root collection
document = models.Document.objects.get(title="Test document")
root_collection = Collection.get_first_root_node()
self.assertEqual(
document.collection,
root_collection
)
# Check that the file_size/hash field was set
self.assertTrue(document.file_size)
self.assertTrue(document.file_hash)
def test_post_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
'collection': evil_plans_collection.id,
}
response = self.client.post(reverse('wagtaildocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be created, and be placed in the Evil Plans collection
self.assertTrue(models.Document.objects.filter(title="Test document").exists())
root_collection = Collection.get_first_root_node()
self.assertEqual(
models.Document.objects.get(title="Test document").collection,
evil_plans_collection
)
class TestDocumentAddViewWithLimitedCollectionPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_doc_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def test_get(self):
response = self.client.get(reverse('wagtaildocs:add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/add.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_post(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit
post_data = {
'title': "Test document",
'file': fake_file,
}
response = self.client.post(reverse('wagtaildocs:add'), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be created in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(models.Document.objects.filter(title="Test document").exists())
self.assertEqual(
models.Document.objects.get(title="Test document").collection,
self.evil_plans_collection
)
class TestDocumentEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Create a document to edit
self.document = models.Document.objects.create(title="Test document", file=fake_file)
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:edit', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/edit.html')
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
def test_post(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit title change
post_data = {
'title': "Test document changed!",
'file': fake_file,
}
response = self.client.post(reverse('wagtaildocs:edit', args=(self.document.id,)), post_data)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document title should be changed
self.assertEqual(models.Document.objects.get(id=self.document.id).title, "Test document changed!")
def test_with_missing_source_file(self):
# Build a fake file
fake_file = ContentFile(b"An ephemeral document")
fake_file.name = 'to-be-deleted.txt'
# Create a new document to delete the source for
document = models.Document.objects.create(title="Test missing source document", file=fake_file)
document.file.delete(False)
response = self.client.get(reverse('wagtaildocs:edit', args=(document.id,)), {})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/edit.html')
self.assertContains(response, 'File not found')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_link(self):
response = self.client.get(reverse('wagtaildocs:edit', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/edit.html')
self.assertContains(response, self.document.usage_url)
self.assertContains(response, 'Used 0 times')
def test_reupload_same_name(self):
"""
Checks that reuploading the document file with the same file name
changes the file name, to avoid browser cache issues (see #3816).
"""
old_filename = self.document.file.name
new_name = self.document.filename
new_file = SimpleUploadedFile(new_name, b'An updated test content.')
response = self.client.post(reverse('wagtaildocs:edit', args=(self.document.pk,)), {
'title': self.document.title, 'file': new_file,
})
self.assertRedirects(response, reverse('wagtaildocs:index'))
self.document.refresh_from_db()
self.assertFalse(self.document.file.storage.exists(old_filename))
self.assertTrue(self.document.file.storage.exists(self.document.file.name))
self.assertNotEqual(self.document.file.name, 'documents/' + new_name)
self.assertEqual(self.document.file.read(),
b'An updated test content.')
def test_reupload_different_name(self):
"""
Checks that reuploading the document file with a different file name
correctly uses the new file name.
"""
old_filename = self.document.file.name
new_name = 'test_reupload_different_name.txt'
new_file = SimpleUploadedFile(new_name, b'An updated test content.')
response = self.client.post(reverse('wagtaildocs:edit', args=(self.document.pk,)), {
'title': self.document.title, 'file': new_file,
})
self.assertRedirects(response, reverse('wagtaildocs:index'))
self.document.refresh_from_db()
self.assertFalse(self.document.file.storage.exists(old_filename))
self.assertTrue(self.document.file.storage.exists(self.document.file.name))
self.assertEqual(self.document.file.name, 'documents/' + new_name)
self.assertEqual(self.document.file.read(),
b'An updated test content.')
class TestDocumentDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a document to delete
self.document = models.Document.objects.create(title="Test document")
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:delete', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/confirm_delete.html')
def test_delete(self):
# Submit title change
response = self.client.post(reverse('wagtaildocs:delete', args=(self.document.id,)))
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document should be deleted
self.assertFalse(models.Document.objects.filter(id=self.document.id).exists())
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_link(self):
response = self.client.get(reverse('wagtaildocs:delete', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/confirm_delete.html')
self.assertContains(response, self.document.usage_url)
self.assertContains(response, 'Used 0 times')
class TestMultipleDocumentUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple document upload views located in wagtaildocs/views/multiple.py
"""
edit_post_data = {
'title': "New title!",
'tags': "",
}
def setUp(self):
self.login()
# Create a document for running tests on
self.doc = models.get_document_model().objects.create(
title="Test document",
file=ContentFile(b"Simple text document"),
)
def check_doc_after_edit(self):
self.doc.refresh_from_db()
self.assertEqual(self.doc.title, "New title!")
self.assertFalse(self.doc.tags.all())
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtaildocs:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/multiple/add.html')
# no collection chooser when only one collection exists
self.assertNotContains(response, '<label for="id_adddocument_collection">')
def test_add_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
# Send request
response = self.client.get(reverse('wagtaildocs:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/multiple/add.html')
# collection chooser should exisst
self.assertContains(response, '<label for="id_adddocument_collection">')
self.assertContains(response, 'Evil plans')
def test_add_post(self):
"""
This tests that a POST request to the add view saves the document and returns an edit form
"""
response = self.client.post(reverse('wagtaildocs:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"Simple text document"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtaildocs/multiple/edit_form.html')
# Check document
self.assertIn('doc', response.context)
self.assertEqual(response.context['doc'].title, 'test.png')
self.assertTrue(response.context['doc'].file_size)
self.assertTrue(response.context['doc'].file_hash)
# check that it is in the root collection
doc = models.get_document_model().objects.get(title='test.png')
root_collection = Collection.get_first_root_node()
self.assertEqual(doc.collection, root_collection)
# Check form
self.assertIn('form', response.context)
self.assertEqual(
set(response.context['form'].fields),
set(models.get_document_model().admin_form_fields) - {'file', 'collection'},
)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('doc_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['doc_id'], response.context['doc'].id)
self.assertTrue(response_json['success'])
# form should not contain a collection chooser
self.assertNotIn('Collection', response_json['form'])
def test_add_post_with_collections(self):
"""
This tests that a POST request to the add view saves the document
and returns an edit form, when collections are active
"""
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
response = self.client.post(reverse('wagtaildocs:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"Simple text document"),
'collection': evil_plans_collection.id
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtaildocs/multiple/edit_form.html')
# Check document
self.assertIn('doc', response.context)
self.assertEqual(response.context['doc'].title, 'test.png')
self.assertTrue(response.context['doc'].file_size)
self.assertTrue(response.context['doc'].file_hash)
# check that it is in the 'evil plans' collection
doc = models.get_document_model().objects.get(title='test.png')
root_collection = Collection.get_first_root_node()
self.assertEqual(doc.collection, evil_plans_collection)
# Check form
self.assertIn('form', response.context)
self.assertEqual(
set(response.context['form'].fields),
set(models.get_document_model().admin_form_fields) - {'file'} | {'collection'},
)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('doc_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['doc_id'], response.context['doc'].id)
self.assertTrue(response_json['success'])
# form should contain a collection chooser
self.assertIn('Collection', response_json['form'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtaildocs:add_multiple'))
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtaildocs:add_multiple'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtaildocs:edit_multiple', args=(self.doc.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the document
"""
# Send request
response = self.client.post(
reverse('wagtaildocs:edit_multiple', args=(self.doc.id, )),
{'doc-%d-%s' % (self.doc.id, field): data for field, data in self.edit_post_data.items()},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('doc_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['doc_id'], self.doc.id)
self.assertTrue(response_json['success'])
self.check_doc_after_edit()
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtaildocs:edit_multiple', args=(self.doc.id, )), {
('doc-%d-title' % self.doc.id): "New title!",
('doc-%d-tags' % self.doc.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtaildocs:edit_multiple', args=(self.doc.id, )), {
('doc-%d-title' % self.doc.id): "", # Required
('doc-%d-tags' % self.doc.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtaildocs/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('doc_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['doc_id'], self.doc.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtaildocs:delete_multiple', args=(self.doc.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the document
"""
# Send request
response = self.client.post(reverse('wagtaildocs:delete_multiple', args=(self.doc.id, )), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the document is deleted
self.assertFalse(models.get_document_model().objects.filter(id=self.doc.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('doc_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['doc_id'], self.doc.id)
self.assertTrue(response_json['success'])
def test_delete_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtaildocs:delete_multiple', args=(self.doc.id, )))
# Check response
self.assertEqual(response.status_code, 400)
@override_settings(WAGTAILDOCS_DOCUMENT_MODEL='tests.CustomDocument')
class TestMultipleCustomDocumentUploader(TestMultipleDocumentUploader):
edit_post_data = dict(TestMultipleDocumentUploader.edit_post_data, description="New description.")
def check_doc_after_edit(self):
super().check_doc_after_edit()
self.assertEqual(self.doc.description, "New description.")
class TestMultipleCustomDocumentUploaderNoCollection(TestMultipleCustomDocumentUploader):
@classmethod
def setUpClass(cls):
super().setUpClass()
Document = models.get_document_model()
fields = tuple(f for f in Document.admin_form_fields if f != 'collection')
cls.__patcher = mock.patch.object(Document, 'admin_form_fields', fields)
cls.__patcher.start()
@classmethod
def tearDownClass(cls):
cls.__patcher.stop()
super().tearDownClass()
class TestDocumentChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:chooser'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/chooser/chooser.html')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
def test_search(self):
response = self.client.get(reverse('wagtaildocs:chooser'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def make_docs(self):
for i in range(50):
document = models.Document(title="Test " + str(i))
document.save()
def test_pagination(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:chooser'), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/list.html')
# Check that we got the correct page
self.assertEqual(response.context['documents'].number, 2)
def test_pagination_invalid(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:chooser'), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/list.html')
# Check that we got page one
self.assertEqual(response.context['documents'].number, 1)
def test_pagination_out_of_range(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:chooser'), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/list.html')
# Check that we got the last page
self.assertEqual(response.context['documents'].number, response.context['documents'].paginator.num_pages)
def test_construct_queryset_hook_browse(self):
document = models.Document.objects.create(
title="Test document shown",
uploaded_by_user=self.user,
)
models.Document.objects.create(
title="Test document not shown",
)
def filter_documents(documents, request):
# Filter on `uploaded_by_user` because it is
# the only default FilterField in search_fields
return documents.filter(uploaded_by_user=self.user)
with self.register_hook('construct_document_chooser_queryset', filter_documents):
response = self.client.get(reverse('wagtaildocs:chooser'))
self.assertEqual(len(response.context['documents']), 1)
self.assertEqual(response.context['documents'][0], document)
def test_construct_queryset_hook_search(self):
document = models.Document.objects.create(
title="Test document shown",
uploaded_by_user=self.user,
)
models.Document.objects.create(
title="Test document not shown",
)
def filter_documents(documents, request):
# Filter on `uploaded_by_user` because it is
# the only default FilterField in search_fields
return documents.filter(uploaded_by_user=self.user)
with self.register_hook('construct_document_chooser_queryset', filter_documents):
response = self.client.get(reverse('wagtaildocs:chooser'), {'q': 'Test'})
self.assertEqual(len(response.context['documents']), 1)
self.assertEqual(response.context['documents'][0], document)
def test_index_without_collections(self):
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'))
self.assertNotContains(response, '<th>Collection</th>')
self.assertNotContains(response, '<td>Root</td>')
def test_index_with_collection(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
self.make_docs()
response = self.client.get(reverse('wagtaildocs:index'))
self.assertContains(response, '<th>Collection</th>')
self.assertContains(response, '<td>Root</td>')
class TestDocumentChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a document to choose
self.document = models.Document.objects.create(title="Test document")
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:document_chosen', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'document_chosen')
class TestDocumentChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:chooser_upload'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/chooser/chooser.html')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
def test_post(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit
post_data = {
'document-chooser-upload-title': "Test document",
'document-chooser-upload-file': fake_file,
}
response = self.client.post(reverse('wagtaildocs:chooser_upload'), post_data)
# Check that the response is the 'document_chosen' step
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'document_chosen')
# Document should be created
self.assertTrue(models.Document.objects.filter(title="Test document").exists())
class TestDocumentChooserUploadViewWithLimitedPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_doc_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def test_simple(self):
response = self.client.get(reverse('wagtaildocs:chooser_upload'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/chooser/chooser.html')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
# user only has access to one collection -> should not see the collections field
self.assertNotIn('id_collection', response_json['html'])
def test_chooser_view(self):
# The main chooser view also includes the form, so need to test there too
response = self.client.get(reverse('wagtaildocs:chooser'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/chooser/chooser.html')
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
# user only has access to one collection -> should not see the collections field
self.assertNotIn('id_collection', response_json['html'])
def test_post(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
# Submit
post_data = {
'document-chooser-upload-title': "Test document",
'document-chooser-upload-file': fake_file,
}
response = self.client.post(reverse('wagtaildocs:chooser_upload'), post_data)
# Check that the response is the 'document_chosen' step
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'document_chosen')
# Document should be created
doc = models.Document.objects.filter(title="Test document")
self.assertTrue(doc.exists())
# Document should be in the 'evil plans' collection
self.assertEqual(doc.get().collection, self.evil_plans_collection)
class TestUsageCount(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_document_usage_count(self):
doc = models.Document.objects.get(id=1)
self.assertEqual(doc.get_usage().count(), 0)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_document_usage_count(self):
doc = models.Document.objects.get(id=1)
page = EventPage.objects.get(id=4)
event_page_related_link = EventPageRelatedLink()
event_page_related_link.page = page
event_page_related_link.link_document = doc
event_page_related_link.save()
self.assertEqual(doc.get_usage().count(), 1)
def test_usage_count_does_not_appear(self):
doc = models.Document.objects.get(id=1)
page = EventPage.objects.get(id=4)
event_page_related_link = EventPageRelatedLink()
event_page_related_link.page = page
event_page_related_link.link_document = doc
event_page_related_link.save()
response = self.client.get(reverse('wagtaildocs:edit',
args=(1,)))
self.assertNotContains(response, 'Used 1 time')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_appears(self):
doc = models.Document.objects.get(id=1)
page = EventPage.objects.get(id=4)
event_page_related_link = EventPageRelatedLink()
event_page_related_link.page = page
event_page_related_link.link_document = doc
event_page_related_link.save()
response = self.client.get(reverse('wagtaildocs:edit',
args=(1,)))
self.assertContains(response, 'Used 1 time')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_zero_appears(self):
response = self.client.get(reverse('wagtaildocs:edit',
args=(1,)))
self.assertContains(response, 'Used 0 times')
class TestGetUsage(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
def test_document_get_usage_not_enabled(self):
doc = models.Document.objects.get(id=1)
self.assertEqual(list(doc.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_document_get_usage(self):
doc = models.Document.objects.get(id=1)
self.assertEqual(list(doc.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_document_get_usage(self):
doc = models.Document.objects.get(id=1)
page = EventPage.objects.get(id=4)
event_page_related_link = EventPageRelatedLink()
event_page_related_link.page = page
event_page_related_link.link_document = doc
event_page_related_link.save()
self.assertTrue(issubclass(Page, type(doc.get_usage()[0])))
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page(self):
doc = models.Document.objects.get(id=1)
page = EventPage.objects.get(id=4)
event_page_related_link = EventPageRelatedLink()
event_page_related_link.page = page
event_page_related_link.link_document = doc
event_page_related_link.save()
response = self.client.get(reverse('wagtaildocs:document_usage',
args=(1,)))
self.assertContains(response, 'Christmas')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page_no_usage(self):
response = self.client.get(reverse('wagtaildocs:document_usage',
args=(1,)))
# There's no usage so there should be no table rows
self.assertRegex(
response.content.decode('utf-8'),
r'<tbody>(\s|\n)*</tbody>'
)
class TestEditOnlyPermissions(TestCase, WagtailTestUtils):
def setUp(self):
# Build a fake file
fake_file = ContentFile(b"A boring example document")
fake_file.name = 'test.txt'
self.root_collection = Collection.get_first_root_node()
self.evil_plans_collection = self.root_collection.add_child(name="Evil plans")
self.nice_plans_collection = self.root_collection.add_child(name="Nice plans")
# Create a document to edit
self.document = models.Document.objects.create(
title="Test document", file=fake_file, collection=self.nice_plans_collection
)
# Create a user with change_document permission but not add_document
user = get_user_model().objects.create_user(
username='changeonly',
email='changeonly@example.com',
password='password'
)
change_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
self.changers_group = Group.objects.create(name='Document changers')
GroupCollectionPermission.objects.create(
group=self.changers_group, collection=self.root_collection,
permission=change_permission
)
user.groups.add(self.changers_group)
user.user_permissions.add(admin_permission)
self.assertTrue(self.client.login(username='changeonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtaildocs:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/index.html')
# user should not get an "Add a document" button
self.assertNotContains(response, "Add a document")
# user should be able to see documents not owned by them
self.assertContains(response, "Test document")
def test_search(self):
response = self.client.get(reverse('wagtaildocs:index'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_get_add(self):
response = self.client.get(reverse('wagtaildocs:add'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_edit(self):
response = self.client.get(reverse('wagtaildocs:edit', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/edit.html')
# documents can only be moved to collections you have add permission for,
# so the 'collection' field is not available here
self.assertNotContains(response, '<label for="id_collection">')
# if the user has add permission on a different collection,
# they should have option to move the document
add_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
GroupCollectionPermission.objects.create(
group=self.changers_group, collection=self.evil_plans_collection,
permission=add_permission
)
response = self.client.get(reverse('wagtaildocs:edit', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, 'Nice plans')
self.assertContains(response, 'Evil plans')
def test_post_edit(self):
# Submit title change
response = self.client.post(
reverse('wagtaildocs:edit', args=(self.document.id,)), {
'title': "Test document changed!",
'file': '',
}
)
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtaildocs:index'))
# Document title should be changed
self.assertEqual(
models.Document.objects.get(id=self.document.id).title,
"Test document changed!"
)
# collection should be unchanged
self.assertEqual(
models.Document.objects.get(id=self.document.id).collection,
self.nice_plans_collection
)
# if the user has add permission on a different collection,
# they should have option to move the document
add_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
GroupCollectionPermission.objects.create(
group=self.changers_group, collection=self.evil_plans_collection,
permission=add_permission
)
response = self.client.post(
reverse('wagtaildocs:edit', args=(self.document.id,)), {
'title': "Test document changed!",
'collection': self.evil_plans_collection.id,
'file': '',
}
)
self.assertEqual(
models.Document.objects.get(id=self.document.id).collection,
self.evil_plans_collection
)
def test_get_delete(self):
response = self.client.get(reverse('wagtaildocs:delete', args=(self.document.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtaildocs/documents/confirm_delete.html')
def test_get_add_multiple(self):
response = self.client.get(reverse('wagtaildocs:add_multiple'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
| {
"content_hash": "54215d3a6f23ae85c8ecfc51940e8515",
"timestamp": "",
"source": "github",
"line_count": 1148,
"max_line_length": 137,
"avg_line_length": 39.98344947735192,
"alnum_prop": 0.6475240190845515,
"repo_name": "mikedingjan/wagtail",
"id": "07bc0854183f8fb39d86c7224abd6319a8a126dc",
"size": "45901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/documents/tests/test_admin_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183841"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "373400"
},
{
"name": "JavaScript",
"bytes": "266257"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3607707"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import types as py_types
import re
import struct
import binascii
import decimal
import logging
import datetime
from weakref import WeakValueDictionary
from pyhdb.protocol.constants import type_codes
from pyhdb.exceptions import InterfaceError
from pyhdb.compat import PY26, PY2, PY3, with_metaclass, iter_range, int_types, \
string_types, byte_type, text_type
from pyhdb.protocol.headers import WriteLobHeader
logger = logging.getLogger('pyhdb')
debug = logger.debug
# Dictionary: keys: numeric type_code, values: Type-(sub)classes (from below)
by_type_code = WeakValueDictionary()
# Dictionary: keys: Python type classes, values: Type-(sub)classes (from below)
by_python_type = WeakValueDictionary()
class TypeMeta(type):
"""
Meta class for Type classes.
"""
@staticmethod
def _add_type_to_type_code_mapping(type_class, type_code):
if not 0 <= type_code <= 127:
raise InterfaceError(
"%s type type_code must be between 0 and 127" %
type_class.__name__
)
by_type_code[type_code] = type_class
def __new__(cls, name, bases, attrs):
type_class = super(TypeMeta, cls).__new__(cls, name, bases, attrs)
# populate by_type_code mapping
if hasattr(type_class, "type_code"):
if isinstance(type_class.type_code, (tuple, list)):
for type_code in type_class.type_code:
TypeMeta._add_type_to_type_code_mapping(type_class, type_code)
else:
TypeMeta._add_type_to_type_code_mapping(
type_class, type_class.type_code
)
# populate by_python_type mapping
if hasattr(type_class, "python_type"):
if isinstance(type_class.python_type, (tuple, list)):
for typ in type_class.python_type:
by_python_type[typ] = type_class
else:
by_python_type[type_class.python_type] = type_class
return type_class
class Type(with_metaclass(TypeMeta, object)):
"""Base class for all types"""
class NoneType(Type):
python_type = None.__class__
@classmethod
def to_sql(cls, _):
return text_type("NULL")
@classmethod
def prepare(cls, type_code):
"""Prepare a binary NULL value for given type code"""
# This is achieved by setting the MSB of the type_code byte to 1
return struct.pack('<B', type_code | 0x80)
class _IntType(Type):
@classmethod
def from_resultset(cls, payload, connection=None):
if payload.read(1) == b"\x01":
# x01 indicates that there is a real value available to be read
return cls._struct.unpack(payload.read(cls._struct.size))[0]
else:
# Value is Null
return None
@classmethod
def prepare(cls, value):
if value is None:
pfield = struct.pack('b', 0)
else:
pfield = struct.pack('b', cls.type_code)
pfield += cls._struct.pack(int(value))
return pfield
class TinyInt(_IntType):
type_code = type_codes.TINYINT
_struct = struct.Struct("B")
class SmallInt(_IntType):
type_code = type_codes.SMALLINT
_struct = struct.Struct("h")
class Int(_IntType):
type_code = type_codes.INT
python_type = int_types
_struct = struct.Struct("i")
@classmethod
def to_sql(cls, value):
return text_type(value)
class BigInt(_IntType):
type_code = type_codes.BIGINT
_struct = struct.Struct("q")
class Decimal(Type):
type_code = type_codes.DECIMAL
python_type = decimal.Decimal
@classmethod
def from_resultset(cls, payload, connection=None):
payload = bytearray(payload.read(16))
payload.reverse()
if payload[0] == 0x70:
return None
sign = payload[0] >> 7
exponent = ((payload[0] & 0x7F) << 7) | ((payload[1] & 0xFE) >> 1)
exponent = exponent - 6176
mantissa = (payload[1] & 0x01) << 112
x = 104
for i in iter_range(2, 16):
mantissa = mantissa | ((payload[i]) << x)
x -= 8
number = pow(-1, sign) * decimal.Decimal(10) ** exponent * mantissa
return number
@classmethod
def to_sql(cls, value):
return text_type(value)
@classmethod
def prepare(cls, value):
if value is None:
return struct.pack('b', 0)
if isinstance(value, float):
value = decimal.Decimal(value)
sign, digits, exponent = value.as_tuple()
if len(digits) > 34:
exponent += len(digits) - 34
mantissa = int(''.join(map(str, digits[:34])))
exponent += 6176
packed = bytearray(16)
packed[0] = (sign << 7) | (exponent >> 7)
packed[1] = ((exponent & 0x7F) << 1) | (mantissa >> 112)
shift = 104
for i in iter_range(2, 16):
packed[i] = (mantissa >> shift) & 0xFF
shift -= 8
packed.reverse()
return struct.pack('b', cls.type_code) + packed
class Real(Type):
type_code = type_codes.REAL
_struct = struct.Struct("<f")
@classmethod
def from_resultset(cls, payload, connection=None):
payload = payload.read(4)
if payload == b"\xFF\xFF\xFF\xFF":
return None
return cls._struct.unpack(payload)[0]
@classmethod
def to_sql(cls, value):
return text_type(value)
@classmethod
def prepare(cls, value):
if value is None:
pfield = struct.pack('b', 0)
else:
pfield = struct.pack('b', cls.type_code)
pfield += cls._struct.pack(float(value))
return pfield
class Double(Type):
type_code = type_codes.DOUBLE
python_type = float
_struct = struct.Struct("<d")
@classmethod
def from_resultset(cls, payload, connection=None):
payload = payload.read(8)
if payload == b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF":
return None
return cls._struct.unpack(payload)[0]
@classmethod
def to_sql(cls, value):
return text_type(value)
@classmethod
def prepare(cls, value):
if value is None:
pfield = struct.pack('b', 0)
else:
pfield = struct.pack('b', cls.type_code)
pfield += cls._struct.pack(float(value))
return pfield
class MixinStringType(object):
"""Mixin class for String types"""
@staticmethod
def get_length(payload):
length_indicator = struct.unpack('B', payload.read(1))[0]
if length_indicator <= 245:
length = length_indicator
elif length_indicator == 246:
length = struct.unpack('h', payload.read(2))[0]
elif length_indicator == 247:
length = struct.unpack('i', payload.read(4))[0]
elif length_indicator == 255:
return None
else:
raise InterfaceError("Unknown length inidcator")
return length
@classmethod
def from_resultset(cls, payload, connection=None):
length = MixinStringType.get_length(payload)
if length is None:
return None
return payload.read(length).decode('cesu-8')
@classmethod
def prepare(cls, value, type_code=type_codes.CHAR):
pfield = struct.pack('b', type_code)
if value is None:
# length indicator
pfield += struct.pack('B', 255)
else:
if not isinstance(value, string_types):
# Value is provided e.g. as integer, but a string is actually required. Try proper casting into string:
value = text_type(value)
value = value.encode('cesu-8')
length = len(value)
# length indicator
if length <= 245:
pfield += struct.pack('B', length)
elif length <= 32767:
pfield += struct.pack('B', 246)
pfield += struct.pack('H', length)
else:
pfield += struct.pack('B', 247)
pfield += struct.pack('I', length)
pfield += value
return pfield
class String(Type, MixinStringType):
type_code = (type_codes.CHAR, type_codes.VARCHAR, type_codes.NCHAR, type_codes.NVARCHAR,
type_codes.STRING, type_codes.NSTRING)
python_type = string_types
ESCAPE_REGEX = re.compile(r"[\']")
ESCAPE_MAP = {"'": "''"}
@classmethod
def to_sql(cls, value):
return "'%s'" % cls.ESCAPE_REGEX.sub(
lambda match: cls.ESCAPE_MAP.get(match.group(0)),
value
)
class Binary(Type, MixinStringType):
type_code = (type_codes.BINARY, type_codes.VARBINARY, type_codes.BSTRING)
python_type = byte_type
@classmethod
def from_resultset(cls, payload, connection=None):
length = MixinStringType.get_length(payload)
if length is None:
return None
return byte_type(payload.read(length))
@classmethod
def to_sql(cls, value):
if PY26:
value = bytes(value)
value = binascii.hexlify(value)
if PY3:
value = value.decode('ascii')
return "'%s'" % value
class Date(Type):
type_code = type_codes.DATE
python_type = datetime.date
_struct = struct.Struct("<HBB")
@classmethod
def from_resultset(cls, payload, connection=None):
payload = bytearray(payload.read(4))
if not payload[1] & 0x80:
return None
year = payload[0] | (payload[1] & 0x3F) << 8
month = payload[2] + 1
day = payload[3]
return cls.python_type(year, month, day)
@classmethod
def to_sql(cls, value):
return "'%s'" % value.isoformat()
@classmethod
def prepare(cls, value):
"""Pack datetime value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
pfield += cls._struct.pack(year, month, value.day)
return pfield
@classmethod
def to_daydate(cls, *argv):
"""
Convert date to Julian day (DAYDATE)
"""
argc = len(argv)
if argc == 3:
year, month, day = argv
elif argc == 1:
dval = argv[0]
try:
year = dval.year
month = dval.month
day = dval.day
except AttributeError:
raise InterfaceError("Unsupported python date input: %s (%s)" % (str(dval), dval.__class__))
else:
raise InterfaceError("Date.to_datetime does not support %d arguments." % argc)
TURN_OF_ERAS = 1721424
if month < 3:
year -= 1
month += 12
if ((year > 1582) or
(year == 1582 and month > 10) or
(year == 1582 and month == 10 and day >= 15)):
A = int(year / 100)
B = int(A / 4)
C = 2 - A + B
else:
C = 0
E = int(365.25 * (year + 4716))
F = int(30.6001 * (month + 1))
Z = C + day + E + F - 1524
return Z + 1 - TURN_OF_ERAS
class Time(Type):
type_code = type_codes.TIME
python_type = datetime.time
_struct = struct.Struct("<BBH")
@classmethod
def from_resultset(cls, payload, connection=None):
hour, minute, millisec = cls._struct.unpack(payload.read(4))
if not hour & 0x80:
return None
hour = hour & 0x7f
second, millisec = divmod(millisec, 1000)
return cls.python_type(hour, minute, second, millisec * 1000)
@classmethod
def to_sql(cls, value):
return "'%s'" % value.strftime("%H:%M:%S")
@classmethod
def prepare(cls, value):
"""Pack time value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(hour, value.minute, millisecond)
return pfield
class Timestamp(Type):
type_code = type_codes.TIMESTAMP
python_type = datetime.datetime
_struct = struct.Struct("<HBBBBH")
@classmethod
def from_resultset(cls, payload, connection=None):
date = Date.from_resultset(payload)
time = Time.from_resultset(payload)
if date is None or time is None:
return None
return datetime.datetime.combine(date, time)
@classmethod
def to_sql(cls, value):
return "'%s.%s'" % (value.strftime("%Y-%m-%d %H:%M:%S"), value.microsecond)
@classmethod
def prepare(cls, value):
"""Pack datetime value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(year, month, value.day, hour, value.minute, millisecond)
return pfield
class MixinLobType(object):
"""Mixin class for all LOB types"""
type_code = None
@classmethod
def from_resultset(cls, payload, connection=None):
# to avoid circular import the 'lobs' module has to be imported here:
from . import lobs
return lobs.from_payload(cls.type_code, payload, connection)
@classmethod
def prepare(cls, value, length=0, position=0, is_last_data=True):
"""Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row!
"""
hstruct = WriteLobHeader.header_struct
lob_option_dataincluded = WriteLobHeader.LOB_OPTION_DATAINCLUDED if length > 0 else 0
lob_option_lastdata = WriteLobHeader.LOB_OPTION_LASTDATA if is_last_data else 0
options = lob_option_dataincluded | lob_option_lastdata
pfield = hstruct.pack(cls.type_code, options, length, position)
return pfield
class ClobType(Type, MixinLobType):
"""CLOB type class"""
type_code = type_codes.CLOB
@classmethod
def encode_value(cls, value):
"""Return value if it is a string, otherwise properly encode unicode to binary ascii string"""
return value.encode('ascii') if isinstance(value, string_types) else value
class NClobType(Type, MixinLobType):
"""NCLOB type class"""
type_code = type_codes.NCLOB
@classmethod
def encode_value(cls, value):
"""Return value if it is a string, otherwise properly encode unicode to binary unicode string"""
return value.encode('utf8') if isinstance(value, text_type) else value
class BlobType(Type, MixinLobType):
"""BLOB type class"""
type_code = type_codes.BLOB
@classmethod
def encode_value(cls, value):
"""Return value if it is a string, otherwise properly encode unicode to binary unicode string"""
return value.encode('utf8') if isinstance(value, text_type) else value
class Geometry(Type, MixinStringType):
"""Geometry type class"""
type_code = type_codes.BLOCATOR
@classmethod
def prepare(cls, value):
return MixinStringType.prepare(value, type_codes.STRING)
@classmethod
def to_sql(cls, value):
return text_type(value)
def escape(value):
"""
Escape a single value.
"""
if isinstance(value, (tuple, list)):
return "(" + ", ".join([escape(arg) for arg in value]) + ")"
else:
typ = by_python_type.get(value.__class__)
if typ is None:
raise InterfaceError(
"Unsupported python input: %s (%s)" % (value, value.__class__)
)
return typ.to_sql(value)
def escape_values(values):
"""
Escape multiple values from a list, tuple or dict.
"""
if isinstance(values, (tuple, list)):
return tuple([escape(value) for value in values])
elif isinstance(values, dict):
return dict([
(key, escape(value)) for (key, value) in values.items()
])
else:
raise InterfaceError("escape_values expects list, tuple or dict")
| {
"content_hash": "90953531e6c7b224b4b4ebdab8941813",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 119,
"avg_line_length": 30.310526315789474,
"alnum_prop": 0.5862128841812815,
"repo_name": "SAP/PyHDB",
"id": "2903b118845227fde652bac845ecc43c826178c9",
"size": "17853",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyhdb/protocol/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "273042"
}
],
"symlink_target": ""
} |
import pymysql #Python3
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print ("Database version : %s " % data)
db.close()
def create_table():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
def db_insert():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| {
"content_hash": "8098854bad5b2f41e6569bc8800698a8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 60,
"avg_line_length": 24.594594594594593,
"alnum_prop": 0.6021978021978022,
"repo_name": "Python-IoT/Smart-IoT-Planting-System",
"id": "d3e9f7de3f63d7f3de57a5c2272c7c0ae564d742",
"size": "932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloud/db/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "563"
},
{
"name": "Python",
"bytes": "56858"
},
{
"name": "Shell",
"bytes": "2897"
}
],
"symlink_target": ""
} |
import hashlib
import urllib
import hmac
import base64
import time
import urlparse
import calendar
import logging
from email.utils import formatdate
import google.protobuf.text_format as text_format
from ots2.error import *
from ots2.protobuf.encoder import OTSProtoBufferEncoder
from ots2.protobuf.decoder import OTSProtoBufferDecoder
import ots2.protobuf.ots_protocol_2_pb2 as pb2
class OTSProtocol:
api_version = '2014-08-08'
encoder_class = OTSProtoBufferEncoder
decoder_class = OTSProtoBufferDecoder
api_list = {
'CreateTable',
'ListTable',
'DeleteTable',
'DescribeTable',
'UpdateTable',
'GetRow',
'PutRow',
'UpdateRow',
'DeleteRow',
'BatchGetRow',
'BatchWriteRow',
'GetRange'
}
def __init__(self, user_id, user_key, instance_name, encoding, logger):
self.user_id = user_id
self.user_key = user_key
self.instance_name = instance_name
self.encoder = self.encoder_class(encoding)
self.decoder = self.decoder_class(encoding)
self.logger = logger
def _make_headers_string(self, headers):
headers_item = ["%s:%s" % (k.lower(), v.strip()) for k, v in headers.iteritems() if k.startswith('x-ots-') and k != 'x-ots-signature']
return "\n".join(sorted(headers_item))
def _call_signature_method(self, signature_string):
# The signature method is supposed to be HmacSHA1
# A switch case is required if there is other methods available
signature = base64.b64encode(hmac.new(
self.user_key, signature_string, hashlib.sha1
).digest())
return signature
def _make_request_signature(self, query, headers):
uri, param_string, query_string = urlparse.urlparse(query)[2:5]
# TODO a special query should be input to test query sorting,
# because none of the current APIs uses query map, but the sorting
# is required in the protocol document.
query_pairs = urlparse.parse_qsl(query_string)
sorted_query = urllib.urlencode(sorted(query_pairs))
signature_string = uri + '\n' + 'POST' + '\n' + sorted_query + '\n'
headers_string = self._make_headers_string(headers)
signature_string += headers_string + '\n'
signature = self._call_signature_method(signature_string)
return signature
def _make_headers(self, body, query):
# compose request headers and process request body if needed
md5 = base64.b64encode(hashlib.md5(body).digest())
date = formatdate(time.time(), usegmt=True)
headers = {
'x-ots-date' : date,
'x-ots-apiversion' : self.api_version,
'x-ots-accesskeyid' : self.user_id,
'x-ots-instancename' : self.instance_name,
'x-ots-contentmd5' : md5,
}
signature = self._make_request_signature(query, headers)
headers['x-ots-signature'] = signature
headers['User-Agent'] = "aliyun-sdk-python 2.0.6"
return headers
def _make_response_signature(self, query, headers):
uri = urlparse.urlparse(query)[2]
headers_string = self._make_headers_string(headers)
signature_string = headers_string + '\n' + uri
signature = self._call_signature_method(signature_string)
return signature
def _convert_urllib3_headers(self, headers):
"""
old urllib3 headers: {'header1':'value1', 'header2':'value2'}
new urllib3 headers: {'header1':('header1', 'value1'), 'header2':('header2', 'value2')}
"""
std_headers = {}
for k,v in headers.iteritems():
if isinstance(v, tuple) and len(v) == 2:
std_headers[k.lower()] = v[1]
else:
std_headers[k.lower()] = v
return std_headers
def _check_headers(self, headers, body, status=None):
# check the response headers and process response body if needed.
# 1, make sure we have all headers
header_names = [
'x-ots-contentmd5',
'x-ots-requestid',
'x-ots-date',
'x-ots-contenttype',
]
if status >= 200 and status < 300:
for name in header_names:
if not name in headers:
raise OTSClientError('"%s" is missing in response header.' % name)
# 2, check md5
if 'x-ots-contentmd5' in headers:
md5 = base64.b64encode(hashlib.md5(body).digest())
if md5 != headers['x-ots-contentmd5']:
raise OTSClientError('MD5 mismatch in response.')
# 3, check date
if 'x-ots-date' in headers:
try:
server_time = time.strptime(headers['x-ots-date'], '%a, %d %b %Y %H:%M:%S %Z')
except ValueError:
raise OTSClientError('Invalid date format in response.')
# 4, check date range
server_unix_time = calendar.timegm(server_time)
now_unix_time = time.time()
if abs(server_unix_time - now_unix_time) > 15 * 60:
raise OTSClientError('The difference between date in response and system time is more than 15 minutes.')
def _check_authorization(self, query, headers, status=None):
auth = headers.get('authorization')
if auth is None:
if status >= 200 and status < 300:
raise OTSClientError('"Authorization" is missing in response header.')
else:
return
# 1, check authorization
if not auth.startswith('OTS '):
raise OTSClientError('Invalid Authorization in response.')
# 2, check accessid
access_id, signature = auth[4:].split(':')
if access_id != self.user_id:
raise OTSClientError('Invalid accesskeyid in response.')
# 3, check signature
if signature != self._make_response_signature(query, headers):
raise OTSClientError('Invalid signature in response.')
def make_request(self, api_name, *args, **kwargs):
if api_name not in self.api_list:
raise OTSClientError('API %s is not supported.' % api_name)
proto = self.encoder.encode_request(api_name, *args, **kwargs)
body = proto.SerializeToString()
query = '/' + api_name
headers = self._make_headers(body, query)
if self.logger.level <= logging.DEBUG:
# prevent to generate formatted message which is time consuming
self.logger.debug("OTS request, API: %s, Headers: %s, Protobuf: %s" % (
api_name, headers,
text_format.MessageToString(proto, as_utf8=True, as_one_line=True)
))
return query, headers, body
def _get_request_id_string(self, headers):
request_id = headers.get('x-ots-requestid')
if request_id is None:
request_id = ""
return request_id
def parse_response(self, api_name, status, headers, body):
if api_name not in self.api_list:
raise OTSClientError("API %s is not supported." % api_name)
headers = self._convert_urllib3_headers(headers)
try:
ret, proto = self.decoder.decode_response(api_name, body)
except Exception, e:
request_id = self._get_request_id_string(headers)
error_message = 'Response format is invalid, %s, RequestID: %s, " \
"HTTP status: %s, Body: %s.' % (str(e), request_id, status, body)
self.logger.error(error_message)
raise OTSClientError(error_message, status)
if self.logger.level <= logging.DEBUG:
# prevent to generate formatted message which is time consuming
request_id = self._get_request_id_string(headers)
self.logger.debug("OTS response, API: %s, RequestID: %s, Protobuf: %s." % (
api_name, request_id,
text_format.MessageToString(proto, as_utf8=True, as_one_line=True)
))
return ret
def handle_error(self, api_name, query, status, reason, headers, body):
# convert headers according to different urllib3 versions.
std_headers = self._convert_urllib3_headers(headers)
if self.logger.level <= logging.DEBUG:
# prevent to generate formatted message which is time consuming
self.logger.debug("OTS response, API: %s, Status: %s, Reason: %s, " \
"Headers: %s" % (api_name, status, reason, std_headers))
if api_name not in self.api_list:
raise OTSClientError('API %s is not supported.' % api_name)
try:
self._check_headers(std_headers, body, status=status)
if status != 403:
self._check_authorization(query, std_headers, status=status)
except OTSClientError, e:
e.http_status = status
e.message += " HTTP status: %s." % status
raise e
if status >= 200 and status < 300:
return
else:
request_id = self._get_request_id_string(std_headers)
try:
error_proto = pb2.Error()
error_proto.ParseFromString(body)
error_code = error_proto.code
error_message = error_proto.message
except:
error_message = "HTTP status: %s, reason: %s." % (status, reason)
self.logger.error(error_message)
raise OTSClientError(error_message, status)
try:
if status == 403 and error_proto.code != "OTSAuthFailed":
self._check_authorization(query, std_headers)
except OTSClientError, e:
e.http_status = status
e.message += " HTTP status: %s." % status
raise e
self.logger.error("OTS request failed, API: %s, HTTPStatus: %s, " \
"ErrorCode: %s, ErrorMessage: %s, RequestID: %s." % (
api_name, status, error_proto.code, error_proto.message, request_id)
)
raise OTSServiceError(status, error_proto.code, error_proto.message, request_id)
| {
"content_hash": "acf791edc2fdc5f57c9f955fb1662644",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 142,
"avg_line_length": 37.80952380952381,
"alnum_prop": 0.5840922301879481,
"repo_name": "wanghq/goots",
"id": "5ab9cc881e8380697848d9cbe83861e03cb46069",
"size": "10346",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doc/aliyun-tablestore-python-sdk-2.0.8/ots2/protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "256672"
}
],
"symlink_target": ""
} |
from . import pyloggingconf
from .debugfile import DebugFile
from .librarylisteners import LibraryListeners
from .listeners import Listeners
from .logger import LOGGER
from .loggerhelper import AbstractLogger
from .xmllogger import XmlLogger
class Output(AbstractLogger):
def __init__(self, settings):
AbstractLogger.__init__(self)
self._xmllogger = XmlLogger(settings['Output'], settings['LogLevel'])
self._register_loggers(settings['Listeners'], settings['DebugFile'])
self._settings = settings
def _register_loggers(self, listeners, debugfile):
LOGGER.register_context_changing_logger(self._xmllogger)
for logger in (Listeners(listeners), LibraryListeners(),
DebugFile(debugfile)):
if logger:
LOGGER.register_logger(logger)
LOGGER.disable_message_cache()
def close(self, result):
self._xmllogger.visit_statistics(result.statistics)
self._xmllogger.close()
LOGGER.unregister_logger(self._xmllogger)
LOGGER.output_file('Output', self._settings['Output'])
def start_suite(self, suite):
LOGGER.start_suite(suite)
def end_suite(self, suite):
LOGGER.end_suite(suite)
def start_test(self, test):
LOGGER.start_test(test)
def end_test(self, test):
LOGGER.end_test(test)
def start_keyword(self, kw):
LOGGER.start_keyword(kw)
def end_keyword(self, kw):
LOGGER.end_keyword(kw)
def message(self, msg):
LOGGER.log_message(msg)
def set_log_level(self, level):
pyloggingconf.set_level(level)
return self._xmllogger.set_log_level(level)
| {
"content_hash": "5927cc96c7cc5726f3590f10c9409fa9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6601654846335697,
"repo_name": "ldtri0209/robotframework",
"id": "b35bd36767b3315eb80669b65149f2f0feb89dc6",
"size": "2300",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/robot/output/output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "16539"
},
{
"name": "HTML",
"bytes": "1011996"
},
{
"name": "Java",
"bytes": "58737"
},
{
"name": "JavaScript",
"bytes": "159003"
},
{
"name": "Python",
"bytes": "1992779"
},
{
"name": "RobotFramework",
"bytes": "4288"
},
{
"name": "Shell",
"bytes": "883"
}
],
"symlink_target": ""
} |
"""Agent with uses A3C trained network"""
import random
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from loveletter.env import LoveLetterEnv
from loveletter.agents.random import AgentRandom
from loveletter.agents.agent import Agent
from loveletter.trainers.a3c_model import ActorCritic
class AgentA3C(Agent):
'''Agent which leverages Actor Critic Learning'''
def __init__(self,
model_path,
dtype,
seed=451):
self._seed = seed
self._idx = 0
self._dtype = dtype
self.env = LoveLetterEnv(AgentRandom(seed), seed)
state = self.env.reset()
self._model = ActorCritic(
state.shape[0], self.env.action_space).type(dtype)
self._model.load_state_dict(torch.load(model_path))
def _move(self, game):
'''Return move which ends in score hole'''
assert game.active()
self._idx += 1
state = self.env.force(game)
state = torch.from_numpy(state).type(self._dtype)
cx = Variable(torch.zeros(1, 256).type(self._dtype), volatile=True)
hx = Variable(torch.zeros(1, 256).type(self._dtype), volatile=True)
_, logit, (hx, cx) = self._model(
(Variable(state.unsqueeze(0), volatile=True), (hx, cx)))
prob = F.softmax(logit)
action_idx = prob.max(1)[1].data.cpu().numpy()[0, 0]
player_action = self.env.action_from_index(action_idx, game)
if player_action is None:
# print("ouch")
options = Agent.valid_actions(game, self._seed + self._idx)
if len(options) < 1:
raise Exception("Unable to play without actions")
random.seed(self._seed + self._idx)
return random.choice(options)
# print("playing ", self._idx, player_action)
return player_action
| {
"content_hash": "f2c0ccb33d65fd0e95e892c34c2338dc",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6052631578947368,
"repo_name": "user01/love-letter",
"id": "2834b4f891cd4151fd5fcdd55e17eb72d750250b",
"size": "1900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loveletter/agents/a3c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "36"
},
{
"name": "Python",
"bytes": "87235"
}
],
"symlink_target": ""
} |
"""HTTP Basic Authentication tool.
This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in
:rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
'tools.auth_basic.accept_charset': 'UTF-8',
}
app_config = { '/' : basic_auth }
"""
import binascii
import unicodedata
import base64
import cherrypy
from cherrypy._cpcompat import ntou, tonative
__author__ = 'visteya'
__date__ = 'April 2009'
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False, accept_charset='utf-8'):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`
and :rfc:`7617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
fallback_charset = 'ISO-8859-1'
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
# split() error, base64.decodestring() error
msg = 'Bad Request'
with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg):
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
charsets = accept_charset, fallback_charset
decoded_params = base64.b64decode(params.encode('ascii'))
decoded_params = _try_decode(decoded_params, charsets)
decoded_params = ntou(decoded_params)
decoded_params = unicodedata.normalize('NFC', decoded_params)
decoded_params = tonative(decoded_params)
username, password = decoded_params.split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
charset = accept_charset.upper()
charset_declaration = (
(', charset="%s"' % charset)
if charset != fallback_charset
else ''
)
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = (
'Basic realm="%s"%s' % (realm, charset_declaration)
)
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
def _try_decode(subject, charsets):
for charset in charsets[:-1]:
try:
return tonative(subject, charset)
except ValueError:
pass
return tonative(subject, charsets[-1])
| {
"content_hash": "85fcf7394af003faa39957a38e305d96",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.6526904262753319,
"repo_name": "Safihre/cherrypy",
"id": "ad379a2601765fdee0809c4174b144d7331d71a2",
"size": "4421",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "cherrypy/lib/auth_basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "510"
},
{
"name": "Python",
"bytes": "1029257"
}
],
"symlink_target": ""
} |
from grappelli.dashboard.dashboards import *
from grappelli.dashboard.registry import *
default_app_config = "grappelli.dashboard.apps.DashboardConfig"
| {
"content_hash": "74b889de9e61f08e5c6762a2652e7b89",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 63,
"avg_line_length": 38.25,
"alnum_prop": 0.8300653594771242,
"repo_name": "lz1988/django-web2015",
"id": "bb9f765438583ae7f5158568c496ba738fc645b4",
"size": "153",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/contrib/grappelli/dashboard/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "797682"
},
{
"name": "CSS",
"bytes": "527578"
},
{
"name": "Emacs Lisp",
"bytes": "152779"
},
{
"name": "Groff",
"bytes": "61139"
},
{
"name": "HTML",
"bytes": "3184026"
},
{
"name": "JavaScript",
"bytes": "760809"
},
{
"name": "Python",
"bytes": "13157847"
},
{
"name": "Ruby",
"bytes": "1758"
},
{
"name": "Shell",
"bytes": "154036"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Tcl",
"bytes": "2476"
},
{
"name": "Yacc",
"bytes": "7550"
}
],
"symlink_target": ""
} |
"""
Base classes and utility functions to handle unit-testing.
"""
import unittest
import os
import sys
import datetime
from contextlib import contextmanager
from maya import cmds
import pymel.core as pymel
import libSerialization
def _get_holded_shapes():
"""
:return: The numbers of shapes connected to anm ctrls in the whole scene.
"""
shapes = []
for net in libSerialization.get_networks_from_class('BaseCtrl'):
if net.hasAttr('shapes'):
shapes.extend(net.shapes.inputs())
return shapes
#
# Decorators
#
def open_scene(path_local):
def deco_open(f):
def f_open(*args, **kwargs):
m_path_local = path_local # make mutable
path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[f.__module__].__file__), m_path_local))
if not os.path.exists(path):
raise Exception("File does not exist on disk! {0}".format(path))
cmds.file(path, open=True, f=True)
return f(*args, **kwargs)
return f_open
return deco_open
def save_on_assert():
"""
Backup the current scene if an exception is raise. Let the exception propagate afteward.
"""
def deco(f):
try:
f()
except Exception:
current_path = cmds.file(q=True, sn=True)
if current_path:
dirname = os.path.dirname(current_path)
basename = os.path.basename(current_path)
filename, ext = os.path.splitext(basename)
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
destination_path = os.path.join(dirname, '{}_{}{}'.format(filename, timestamp, ext))
print("Saving scene to {}".format(destination_path))
cmds.file(rename=destination_path)
cmds.file(save=True, type='mayaAscii')
raise
return deco
def assertMatrixAlmostEqual(a, b, r_epsilon=0.01, t_epsilon=0.1, multiplier=1.0):
"""
Compare two pymel.datatypes.Matrix and assert if they are too far away.
:param a: A pymel.datatypes.Matrix instance.
:param b: A pymel.datatypes.Matrix instance.
:param r_epsilon: How much drift we accept in rotation.
:param t_epsilon: How much drift we accept in translation (in cm).
:param multiplier: How much scaling have been applied. This will affect t_epsilon and r_epsilon.
"""
# Apply multiplier on epsilon
# This is necessary since if we're scaling 10 times, we expect 5 times more imprecision that if we're scaling 2 times.
t_epsilon *= multiplier
a_x, a_y, a_z, a_pos = a.data
b_x, b_y, b_z, b_pos = b.data
# Compare x, y and z axis.
for i in range(3):
a_axis = pymel.datatypes.Vector(a.data[i][0], a.data[i][1], a.data[i][2])
b_axis = pymel.datatypes.Vector(b.data[i][0], b.data[i][1], b.data[i][2])
a_axis.normalize()
b_axis.normalize()
diff = abs(1.0 - a_axis.dot(b_axis))
if diff > r_epsilon:
raise Exception("{} != {} (dot product {} > epsilon {})".format(a_axis, b_axis, diff, r_epsilon))
# Compare position
distance = a_pos.distanceTo(b_pos)
if distance > t_epsilon:
raise Exception("{} != {} (distance {} > epsilon {})".format(a_pos, b_pos, distance, t_epsilon))
@contextmanager
def verified_offset(objs, offset_tm, **kwargs):
"""
Context that store the world matrix of provided object.
An offset matrix is also provided that will be used to determine the desired world matrix of the objects.
If when leaving the context the matrices don't match, an Exeption is raised.
Use this function to test for scaling issue, flipping and double transformation.
:param objs:
:param offset_tm:
:param kwargs:
"""
# Store the base matrices
old_tms = [obj.getMatrix(worldSpace=True) for obj in objs]
yield True
# Verify the matrices matches
for obj, old_tm in zip(objs, old_tms):
new_tm = obj.getMatrix(worldSpace=True)
desired_tm = old_tm * offset_tm
try:
assertMatrixAlmostEqual(new_tm, desired_tm, **kwargs)
except Exception, e:
raise Exception("Invalid transform for {}. {}".format(obj, e))
def validate_built_rig(rig, test_translate=True, test_translate_value=pymel.datatypes.Vector(1, 0, 0), test_rotate=True, test_scale=True, test_scale_value=2.0):
"""
Build a specific rig and verify the following:
- Is the rig scaling correctly?
:param rig: The rig to scale.
:param test_translate: If True, the rig will be verified for translation.
:param test_translate_value: The value to use when testing the translation.
:param test_scale: If True, the rig will be verified for scaling.
:param test_scale_value: The value to use when testing the scale.
"""
influences = rig.get_influences(key=lambda x: isinstance(x, pymel.nodetypes.Joint))
ctrls = rig.get_ctrls()
objs = influences + ctrls
# Ensure the rig translate correctly.
if test_translate:
print("Validating translate...")
offset_tm = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
test_translate_value.x, test_translate_value.y, test_translate_value.z, 1.0
)
with verified_offset(objs, offset_tm, multiplier=test_translate_value.length()):
rig.grp_anm.t.set(test_translate_value)
rig.grp_anm.t.set(0, 0, 0)
if test_rotate:
print("Validating rotate...")
offset_tms_by_rot = (
((90, 90, 90), pymel.datatypes.Matrix(0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)),
((180, 0, 0), pymel.datatypes.Matrix(1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0)),
((0, 180, 0), pymel.datatypes.Matrix(-1.0, 0.0, -0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0)),
((0, 0, 180), pymel.datatypes.Matrix(-1.0, 0.0, 0.0, 0.0, -0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0)),
)
for rot, offset_tm in offset_tms_by_rot:
with verified_offset(objs, offset_tm):
rig.grp_anm.r.set(rot)
rig.grp_anm.r.set(0,0,0)
# Ensure we the rig scale correctly.
if test_scale:
print("Validating scale...")
m = test_scale_value
scale_tm = pymel.datatypes.Matrix(
m, 0, 0, 0,
0, m, 0, 0,
0, 0, m, 0,
0, 0, 0, 1
)
with verified_offset(objs, scale_tm, multiplier=test_scale_value):
rig.grp_anm.globalScale.set(test_scale_value)
rig.grp_anm.globalScale.set(1.0)
class TestCase(unittest.TestCase):
def _test_build_rig(self, rig, **kwargs):
"""
Build a specific rig and verify the following:
- Is the rig scaling correctly?
:param rig: The rig to scale.
:param test_translate: If True, the rig will be verified for translation.
:param test_translate_value: The value to use when testing the translation.
:param test_scale: If True, the rig will be verified for scaling.
:param test_scale_value: The value to use when testing the scale.
"""
rig.build(strict=True)
validate_built_rig(rig, **kwargs)
def _test_unbuild_rig(self, rig, test_shapes=True):
"""
Unbuild a specific rig and verify the following:
- Do we have extra or missing ctrl shapes after building?
:param rig: The rig to unbuild.
:param test_shapes: If True, the number of shape before and after will be checked.
"""
num_holder_shapes_before = len(_get_holded_shapes())
rig.unbuild()
num_holder_shapes_after = len(_get_holded_shapes())
self.assertEqual(num_holder_shapes_before, num_holder_shapes_after)
def _build_unbuild_build_all(self, **kwargs):
"""
Build/Unbuild/Build all the rig in the scene and check for the following errors:
- Is there junk shapes remaining? This could be a sign that we didn't cleanup correctly.
:return:
"""
import omtk
num_holder_shapes_before = len(_get_holded_shapes())
rigs = omtk.find()
for rig in rigs:
self._test_build_rig(rig, **kwargs)
self._test_unbuild_rig(rig)
self._test_build_rig(rig, **kwargs)
# Ensure no shapes are left after a rebuild.
num_holder_shapes_after = len(_get_holded_shapes())
self.assertEqual(num_holder_shapes_before, num_holder_shapes_after) | {
"content_hash": "c209b4c1f83f8a98b5b4ca105639e9a6",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 160,
"avg_line_length": 38.99551569506726,
"alnum_prop": 0.6062557497700092,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "9235790999347b44bd5e9ea94d2b3b501960fc2f",
"size": "8696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
from syncer import Syncer
class Photo(object):
def __init__(self, dict):
self.__dict__.update(dict)
class Tests():
def test_date_path_zero_pads_months(self):
testee = Syncer("/storage")
photo = Photo({
"dateTakenYear" : "2014",
"dateTakenMonth" : "4",
"dateTakenDay" : "16",
"filenameOriginal" : "monkey.png" })
path = testee._getDatePathTo(photo)
assert "/storage/date/2014/04/16/monkey.png" == path
def test_date_path_zero_pads_days(self):
testee = Syncer("/storage")
photo = Photo({
"dateTakenYear" : "2014",
"dateTakenMonth" : "12",
"dateTakenDay" : "6",
"filenameOriginal" : "monkey.png" })
path = testee._getDatePathTo(photo)
assert "/storage/date/2014/12/06/monkey.png" == path
| {
"content_hash": "1118678592a72100692768dcfeb20a7d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 54,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.6480978260869565,
"repo_name": "FredrikWendt/trovebox-syncer",
"id": "89078cebd0500a2df77ded8f35762385a5a217ad",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syncer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5524"
}
],
"symlink_target": ""
} |
"""
Declarative.
..description: Database models.
..author: Arthur Moore <arthur.moore85@gmail.com>
..date: 07/03/2016
"""
from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
__author__ = 'amoore'
Base = declarative_base()
ENGINE = create_engine('sqlite:///slack_bot_database.db')
class Person(Base):
"""
Person model
"""
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
username = Column(String(100), nullable=False)
class TicketQueue(Base):
"""
Ticket Queue model
"""
__tablename__ = 'ticketqueue'
id = Column(Integer, primary_key=True)
requester_id = Column(Integer, ForeignKey('person.id'), primary_key=True)
receiver_id = Column(Integer, ForeignKey('person.id'), primary_key=True)
message = Column(String(250), nullable=False)
requester = relationship('Person', foreign_keys='TicketQueue.requester_id')
receiver = relationship('Person', foreign_keys='TicketQueue.receiver_id')
def load_session():
"""
Loads the model session
:return:
"""
metadata = Base.metadata
Session = sessionmaker(bind=ENGINE)
session = Session()
return session
if __name__ == '__main__':
# Create an engine
# Create the tables.
Base.metadata.create_all(ENGINE) | {
"content_hash": "f58abf5327bee4e644d812e96339c34d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 25.535714285714285,
"alnum_prop": 0.6825174825174826,
"repo_name": "ArthurMoore85/rb5-slack-bot",
"id": "01c745853b529dc0e4505a92a4cca3583dbbb723",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/create_database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28853"
}
],
"symlink_target": ""
} |
import numpy as np
import sys
from .base import Prox
from .build.prox import ProxEqualityDouble as _ProxEqualityDouble
from .build.prox import ProxEqualityFloat as _ProxEqualityFloat
__author__ = 'Stephane Gaiffas'
dtype_map = {
np.dtype("float64"): _ProxEqualityDouble,
np.dtype("float32"): _ProxEqualityFloat
}
class ProxEquality(Prox):
"""Projection operator onto the set of vector with all coordinates equal
(or in the given range if given one).
Namely, this simply replaces all coordinates by their average
Parameters
----------
strength : `float`, default=0.
Not used in this prox, but kept for compatibility issues
range : `tuple` of two `int`, default=`None`
Range on which the prox is applied. If `None` then the prox is
applied on the whole vector
positive : `bool`, default=`False`
If True, ensures that the output of the prox has only non-negative
entries (in the given range)
Attributes
----------
dtype : `{'float64', 'float32'}`
Type of the arrays used.
"""
_attrinfos = {"positive": {"writable": True, "cpp_setter": "set_positive"}}
def __init__(self, strength: float = 0, range: tuple = None,
positive: bool = False):
Prox.__init__(self, range)
self.positive = positive
self._prox = self._build_cpp_prox("float64")
def _call(self, coeffs: np.ndarray, step: object, out: np.ndarray):
self._prox.call(coeffs, step, out)
def value(self, coeffs: np.ndarray):
"""
Simply returns 0 if all coeffs in range are equal. Other wise returns
infinity. This is not a penalization but a projection.
Parameters
----------
coeffs : `numpy.ndarray`, shape=(n_coeffs,)
Vector to be projected
Returns
-------
output : `float`
Returns 0 or np.inf
"""
raw_value = self._prox.value(coeffs)
if raw_value == sys.float_info.max:
return np.inf
else:
return 0
@property
def strength(self):
return None
@strength.setter
def strength(self, val):
# Strength is not settable in this prox
pass
def _build_cpp_prox(self, dtype_or_object_with_dtype):
self.dtype = self._extract_dtype(dtype_or_object_with_dtype)
prox_class = self._get_typed_class(dtype_or_object_with_dtype,
dtype_map)
if self.range is None:
return prox_class(0., self.positive)
else:
return prox_class(0., self.range[0], self.range[1], self.positive)
return None
| {
"content_hash": "e9859a5d3732b35f606c95ff771d7d20",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 30.370786516853933,
"alnum_prop": 0.5985941546429893,
"repo_name": "X-DataInitiative/tick",
"id": "cd3ca441fed49b77f9c0546c260bed641b1cc4f8",
"size": "2752",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/prox/prox_equality.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5890"
},
{
"name": "C++",
"bytes": "1246006"
},
{
"name": "CMake",
"bytes": "25186"
},
{
"name": "Dockerfile",
"bytes": "2039"
},
{
"name": "Python",
"bytes": "1492424"
},
{
"name": "SWIG",
"bytes": "192101"
},
{
"name": "Shell",
"bytes": "32367"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from spaces import multi_discrete_with_none
from gym.spaces import multi_discrete
class MultiDiscreteWithNoneTest(absltest.TestCase):
def test_space_contains_none(self):
"""The space should contain None."""
space = multi_discrete_with_none.MultiDiscreteWithNone([1, 2, 3, 4])
self.assertTrue(space.contains(None))
def test_base_and_with_none_agree(self):
"""MultiDiscrete and MultiDiscreteWithNone should agree about non-None."""
nvec = [4, 4, 4]
multi_discrete_space = multi_discrete.MultiDiscrete(nvec)
multi_discrete_with_none_space = (
multi_discrete_with_none.MultiDiscreteWithNone(nvec))
for test_vec in (
[-1, 1, 2],
[2, 1, 0],
[1, 2, 3]):
self.assertEqual(multi_discrete_space.contains(test_vec),
multi_discrete_with_none_space.contains(test_vec))
def test_none_can_be_sampled(self):
space = multi_discrete_with_none.MultiDiscreteWithNone(
nvec=[1, 2, 3, 4], none_probability=1)
self.assertIsNone(space.sample())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "ca0436aa602f917463e5f3336c41dbf9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 33.2972972972973,
"alnum_prop": 0.6793831168831169,
"repo_name": "google/ml-fairness-gym",
"id": "9934d3423c22deecc32cfd3175b8fec126bd0f50",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spaces/multi_discrete_with_none_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "654678"
},
{
"name": "Shell",
"bytes": "1613"
}
],
"symlink_target": ""
} |
import platform
import os
import stat
import logging
import json
import concurrent
from concurrent.futures import ThreadPoolExecutor
import requests
from requests.compat import urljoin, quote_plus
from .githash import calc_file_checksum
# TODO: Properly use mirrors and load them from a file (which is also synced)
mirrors = ["http://tzaeru.com:4445/"]
platformToDir = {
"Linux": "linux",
"Darwin": "mac",
"Windows": "windows"
}
def try_get(resource):
#quoted = quote_plus(resource)
for m in mirrors:
url = urljoin(m, resource)
r = requests.get(url)
return r
def download_files(update_list, callback=None, root_path=None):
if root_path is None:
root_path = os.getcwd()
def dl_file(update):
download_file(update["server_path"],
os.path.join(root_path, update["local_path"]),
checksum=update["checksum"],
callback=callback)
with ThreadPoolExecutor(max_workers=20) as pool:
future_to_update = {pool.submit(dl_file, update): update for update in update_list}
for future in concurrent.futures.as_completed(future_to_update):
update = future_to_update[future]
try:
_ = future.result()
except Exception as exc:
logging.error('%r generated an exception: %s' % (update, exc))
raise exc
def download_file(url, path, checksum=None, callback=None, max_attempts=5):
logging.info("Download file: {} from URL: {}".format(path, url))
parent = os.path.dirname(path)
if parent.strip() != "" and not os.path.exists(parent):
logging.debug("Making subdirs because they do not exist: {}".format(parent))
try:
os.makedirs(parent)
except FileExistsError as e:
# ignore file exists error due to race conditions
pass
mirror = mirrors[0]
url = mirror + "download?path=" + url
r = requests.get(url, stream=True)
# print(requests.head(url, stream=True).headers)
# we probably don't need to get the content_length again
# content_length = r.headers.get('Content-length')
# content_length = int(content_length)
old_permissions = None
if os.path.exists(path):
logging.info("Removing existing file: {}".format(path))
old_permissions = os.stat(path).st_mode
os.remove(path)
retry_count = 0
while True:
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if callback is not None:
callback(len(chunk))
if checksum is None or calc_file_checksum(path) == checksum:
break
elif retry_count >= max_attempts:
raise Exception("Failed to download correct file: {} after {} attempts".format(url, max_attempts))
else: # try again
retry_count += 1
os.remove(path)
r = requests.get(url, stream=True)
if old_permissions is not None:
os.chmod(path, old_permissions)
logging.info("Downloaded: {}".format(path))
def get_update_list(launcher_game_id, root_path=None):
platformDir = platformToDir[platform.system()]
# We use the update list as a dictionary so newer paths overwrite older ones
# This would allow games to overwrite default launcher files
# In a way it works similar to how Spring mutators work
update_list = {}
existing_list = {}
if root_path is None:
root_path = os.getcwd()
res = try_get("files/")
manifest = res.json()
def _resolve_file(path, download_url, checksum):
item = {
"server_path": download_url,
"local_path": path,
"checksum": checksum,
}
if os.path.exists(path):
local_checksum = calc_file_checksum(path)
if checksum != local_checksum:
update_list[path] = item
else:
if path in existing_list:
del existing_list[path]
existing_list[path] = item
else:
update_list[path] = item
for file, keys in manifest["spring-launcher-dist"].items():
checksum = keys["checksum"]
parts = file.split("/")
if parts[0] != platformDir:
continue
path = os.sep.join(parts[1:])
download_url = "spring-launcher-dist/" + file
_resolve_file(path, download_url, checksum)
res = try_get("files/{}".format(launcher_game_id))
if res.status_code == requests.codes.ok:
manifest = res.json()
top_key = list(manifest.keys())[0]
for path, keys in manifest[top_key].items():
download_url = keys["path"]
checksum = keys["checksum"]
_resolve_file(path, download_url, checksum)
update_list = list(update_list.values())
if len(update_list) == 0:
return update_list, existing_list
m = mirrors[0]
urls = [urljoin(m, "download?path=" + up["server_path"]) for up in update_list]
# Get all sizes in parallel, so it doesn't fetch them forever
def get_size(url):
return int(requests.head(url, stream=True).headers.get("Content-length"))
with ThreadPoolExecutor(max_workers=50) as pool:
sizes = list(pool.map(get_size, urls))
for i, size in enumerate(sizes):
update_list[i]["size"] = size
return update_list, existing_list
| {
"content_hash": "a3186b585b1c85629d5d48d1af50520e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 110,
"avg_line_length": 33.75,
"alnum_prop": 0.5989159891598916,
"repo_name": "Spring-Chobby/ChobbyLauncher",
"id": "9bf324d0b12992081ad45e4b1c7357f2e65a100c",
"size": "5535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spring_launcher/auto_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41327"
},
{
"name": "Python",
"bytes": "42840"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import datetime
from django.db.models import DateTimeField
from django.conf import settings
from django.utils.encoding import smart_text
from django.utils import six, timezone
from doj.db.backends import JDBCBaseDatabaseOperations as BaseDatabaseOperations
try:
import pytz
except ImportError:
pytz = None
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "doj.db.backends.mssql.compiler"
# map of sql_function: (new sql_function, new sql_template )
# If sql_template is None, it will not be overridden.
_sql_function_overrides = {
'STDDEV_SAMP': ('STDEV', None),
'STDDEV_POP': ('STDEVP', None),
'VAR_SAMP': ('VAR', None),
'VAR_POP': ('VARP', None),
}
def __init__(self, *args, **kwargs):
super(DatabaseOperations, self).__init__(*args, **kwargs)
if self.connection.cast_avg_to_float:
# Need to cast as float to avoid truncating to an int
self._sql_function_overrides['AVG'] = ('AVG', '%(function)s(CAST(%(field)s AS FLOAT))')
def cache_key_culling_sql(self):
return """
SELECT [cache_key]
FROM (SELECT [cache_key], ROW_NUMBER() OVER (ORDER BY [cache_key]) AS [rank] FROM %s) AS [RankedCache]
WHERE [rank] = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
field_name = self.quote_name(field_name)
if lookup_type == 'week_day':
lookup_type = 'weekday'
return 'DATEPART(%s, %s)' % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for SQL Server.
"""
sign = 1 if connector == '+' else -1
if timedelta.seconds or timedelta.microseconds:
# assume the underlying datatype supports seconds/microseconds
seconds = ((timedelta.days * 86400) + timedelta.seconds) * sign
out = sql
if seconds:
out = 'DATEADD(SECOND, {0}, {1})'.format(seconds, sql)
if timedelta.microseconds:
# DATEADD with datetime doesn't support ms, must cast up
out = 'DATEADD(MICROSECOND, {ms}, CAST({sql} as datetime2))'.format(
ms=timedelta.microseconds * sign,
sql=out,
)
else:
# Only days in the delta, assume underlying datatype can DATEADD with days
out = 'DATEADD(DAY, {0}, {1})'.format(timedelta.days * sign, sql)
return out
def date_trunc_sql(self, lookup_type, field_name):
return "DATEADD(%s, DATEDIFF(%s, 0, %s), 0)" % (lookup_type, lookup_type, field_name)
def _switch_tz_offset_sql(self, field_name, tzname):
"""
Returns the SQL that will convert field_name to UTC from tzname.
"""
field_name = self.quote_name(field_name)
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
tz = pytz.timezone(tzname)
td = tz.utcoffset(datetime.datetime(2000, 1, 1))
def total_seconds(td):
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return td.days * 24 * 60 * 60 + td.seconds
total_minutes = total_seconds(td) // 60
hours, minutes = divmod(total_minutes, 60)
tzoffset = "%+03d:%02d" % (hours, minutes)
field_name =\
"CAST(SWITCHOFFSET(TODATETIMEOFFSET(%s, '+00:00'), '%s') AS DATETIME2)" % (field_name, tzoffset)
return field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
if lookup_type == 'week_day':
lookup_type = 'weekday'
return 'DATEPART({0}, {1})'.format(
lookup_type,
self._switch_tz_offset_sql(field_name, tzname),
), []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
field_name = self._switch_tz_offset_sql(field_name, tzname)
reference_date = '0' # 1900-01-01
if lookup_type in ['minute', 'second']:
# Prevent DATEDIFF overflow by using the first day of the year as
# the reference point. Only using for minute and second to avoid any
# potential performance hit for queries against very large datasets.
reference_date = "CONVERT(datetime2, CONVERT(char(4), {field_name}, 112) + '0101', 112)".format(
field_name=field_name,
)
sql = "DATEADD({lookup}, DATEDIFF({lookup}, {reference_date}, {field_name}), {reference_date})".format(
lookup=lookup_type,
field_name=field_name,
reference_date=reference_date,
)
return sql, []
def last_insert_id(self, cursor, table_name, pk_name):
"""
Fetch the last inserted ID by executing another query.
"""
# IDENT_CURRENT returns the last identity value generated for a
# specific table in any session and any scope.
# http://msdn.microsoft.com/en-us/library/ms175098.aspx
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [self.quote_name(table_name)])
return cursor.fetchone()[0]
def return_insert_id(self):
"""
MSSQL implements the RETURNING SQL standard extension differently from
the core database backends and this function is essentially a no-op.
The SQL is altered in the SQLInsertCompiler to add the necessary OUTPUT
clause.
"""
return (None, None)
def no_limit_value(self):
return None
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return (
smart_text(x).
replace("\\", "\\\\").
replace("%", "\%").
replace("_", "\_").
replace("[", "\[").
replace("]", "\]")
)
def quote_name(self, name):
if name.startswith('[') and name.endswith(']'):
return name # already quoted
return '[%s]' % name
def random_function_sql(self):
return 'NEWID()'
def regex_lookup(self, lookup_type):
# Case sensitivity
match_option = {'iregex': 0, 'regex': 1}[lookup_type]
return "dbo.REGEXP_LIKE(%%s, %%s, %s)=1" % (match_option,)
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
Originally taken from django-pyodbc project.
"""
if not tables:
return list()
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY; use DELETE instead.
# (which is slow)
with self.connection.cursor() as cursor:
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = dict()
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
sql_list = list()
# Turn off constraints.
sql_list.append('EXEC sp_MSforeachtable "ALTER TABLE ? NOCHECK CONSTRAINT all"')
# Delete data from tables.
sql_list.extend(
['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(t))
) for t in tables]
)
# Reset the counters on each table.
sql_list.extend(
['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs]
)
# Turn constraints back on.
sql_list.append('EXEC sp_MSforeachtable "ALTER TABLE ? WITH NOCHECK CHECK CONSTRAINT all"')
return sql_list
def tablespace_sql(self, tablespace, inline=False):
return "ON %s" % self.quote_name(tablespace)
def __to_truncated_datetime_string(self, value):
"""
Format a datetime to a internationalize string parsable by either a
'datetime' or 'datetime2'.
"""
if isinstance(value, datetime.datetime):
# Strip '-' so SQL Server parses as YYYYMMDD for all languages/formats
val = value.isoformat(b' ').replace('-', '')
if value.microsecond:
# truncate to millisecond so SQL's 'datetime' can parse it
idx = val.rindex('.')
val = val[:idx + 4] + val[idx + 7:]
return val
raise TypeError("'value' must be a date or datetime")
def value_to_db_datetime(self, value):
if value is None or isinstance(value, six.string_types):
return value
if timezone.is_aware(value):# and not self.connection.features.supports_timezones:
if getattr(settings, 'USE_TZ', False):
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQL Server backend does not support timezone-aware datetimes.")
return value.isoformat()
def value_to_db_time(self, value):
if value is None or isinstance(value, six.string_types):
return value
if timezone.is_aware(value):
if not getattr(settings, 'USE_TZ', False) and hasattr(value, 'astimezone'):
value = timezone.make_naive(value, timezone.utc)
else:
raise ValueError("SQL Server backend does not support timezone-aware times.")
return value.isoformat()
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None or value == '':
return None
return value # Should be a decimal type (or string)
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = self.value_to_db_date(datetime.date(value, 1, 1))
second = self.value_to_db_date(datetime.date(value, 12, 31))
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
ms = 999999
second = datetime.datetime(value, 12, 31, 23, 59, 59, ms)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [self.value_to_db_datetime(first), self.value_to_db_datetime(second)]
def convert_values(self, value, field):
"""
MSSQL needs help with date fields that might come out as strings.
"""
if field:
internal_type = field.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
value = DateTimeField().to_python(convert_microsoft_date_to_isoformat(value))
else:
value = super(DatabaseOperations, self).convert_values(value, field)
return value
def bulk_insert_sql(self, fields, num_values):
"""
Format the SQL for bulk insert
"""
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def max_name_length(self):
"""
MSSQL supports identifier names up to 128
"""
return 128
def _supports_stddev(self):
"""
Work around for django ticket #18334.
This backend supports StdDev and the SQLCompilers will remap to
the correct function names.
"""
return True
def enable_identity_insert(self, table):
"""
Backends can implement as needed to enable inserts in to
the identity column.
Should return True if identity inserts have been enabled.
"""
if table:
cursor = self.connection.cursor()
cursor.execute('SET IDENTITY_INSERT {0} ON'.format(
self.connection.ops.quote_name(table)
))
return True
return False
def disable_identity_insert(self, table):
"""
Backends can implement as needed to disable inserts in to
the identity column.
Should return True if identity inserts have been disabled.
"""
if table:
cursor = self.connection.cursor()
cursor.execute('SET IDENTITY_INSERT {0} OFF'.format(
self.connection.ops.quote_name(table)
))
return True
return False
def savepoint_create_sql(self, sid):
return """\
DECLARE @TranCounter INT;
SET @TranCounter = @@TRANCOUNT;
IF @TranCounter > 0
SAVE TRANSACTION {0};
ELSE
BEGIN TRANSACTION {0};
""".format(self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return """\
BEGIN TRY
ROLLBACK TRANSACTION {0};
END TRY
BEGIN CATCH
END CATCH
""".format(self.quote_name(sid))
def combine_expression(self, connector, sub_expressions):
"""
MSSQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def convert_microsoft_date_to_isoformat(value):
if isinstance(value, six.string_types):
value = value.replace(' +', '+').replace(' -', '-')
return value
| {
"content_hash": "9e4c65f2b5ec605310f9e581daff3903",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 116,
"avg_line_length": 37.61244019138756,
"alnum_prop": 0.5713649662892761,
"repo_name": "beachmachine/django-jython",
"id": "4451cd40183701105604403e0f00b0dd203a2169",
"size": "15747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doj/db/backends/mssql/operations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "281227"
}
],
"symlink_target": ""
} |
"""
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from os import path
from setuptools import setup
def read(filename):
with open(path.join(path.dirname(__file__), filename), encoding="utf-8") as f:
return f.read()
setup(
name="pdepy",
version="1.0.4",
description="A Finite-Difference PDE solver.",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/olivertso/pdepy",
author="Oliver Hung Buo Tso",
author_email="olivertsor@gmail.com",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
],
keywords="partial-differential-equations finite-difference-method",
packages=["pdepy"],
install_requires=read("requirements.in").splitlines(),
python_requires=">=3.6",
)
| {
"content_hash": "d50e1ccd9168a7dfd4b45ee2d479b8ef",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 33.28888888888889,
"alnum_prop": 0.6528704939919893,
"repo_name": "olivertso/pde",
"id": "3f8cb5c1f2e62bf246b9c40e13256f9099fc70d4",
"size": "1498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16210"
}
],
"symlink_target": ""
} |
import sys
import subprocess
import json
import math
# xref prow/Makefile get-build-cluster-credentials
# TODO: perhaps make these configurable
CLUSTER = 'prow'
ZONE = 'us-central1-f'
PROJECT = 'k8s-prow-builds'
def get_pool_sizes(project, zone, cluster):
"""returns a map of node pool name to size using the gcloud cli."""
sizes = {}
# map managed instance group names to node pools and record pool names
node_pools = json.loads(subprocess.check_output([
'gcloud', 'container', 'node-pools', 'list',
'--project', project, '--cluster', cluster, '--zone', zone,
'--format=json',
], encoding='utf-8'))
group_to_pool = {}
for pool in node_pools:
# later on we will sum up node counts from instance groups
sizes[pool['name']] = 0
# this is somewhat brittle, the last component of the URL is the instance group name
# the better way to do this is probably to use the APIs directly
for url in pool['instanceGroupUrls']:
instance_group = url.split('/')[-1]
group_to_pool[instance_group] = pool['name']
# map instance groups to node counts
groups = json.loads(subprocess.check_output([
'gcloud', 'compute', 'instance-groups', 'list',
'--project', project, '--filter=zone:({})'.format(zone),
'--format=json',
], encoding='utf-8'))
for group in groups:
if group['name'] not in group_to_pool:
continue
sizes[group_to_pool[group['name']]] += group['size']
return sizes
def resize_nodepool(pool, new_size, project, zone, cluster):
"""resize the nodepool to new_size using the gcloud cli"""
cmd = [
'gcloud', 'container', 'clusters', 'resize', cluster,
'--zone', zone, '--project', project, '--node-pool', pool,
'--num-nodes', str(new_size), '--quiet',
]
print(cmd)
subprocess.call(cmd)
def prompt_confirmation():
"""prompts for interactive confirmation, exits 1 unless input is 'yes'"""
sys.stdout.write('Please confirm (yes/no): ')
response = input()
if response != 'yes':
print('Cancelling.')
sys.exit(-1)
print('Confirmed.')
def main():
# parse cli
nodes_to_add = int(sys.argv[-1])
ratio = sys.argv[-2].split(':')
shrink_increment, grow_increment = int(ratio[0]), int(ratio[1])
pool_to_grow = sys.argv[-3]
pool_to_shrink = sys.argv[-4]
# obtain current pool sizes
pool_sizes = get_pool_sizes(PROJECT, ZONE, CLUSTER)
pool_to_grow_initial = pool_sizes[pool_to_grow]
pool_to_shrink_initial = pool_sizes[pool_to_shrink]
# compute final pool sizes
pool_to_grow_target = pool_to_grow_initial + nodes_to_add
n_iter = int(math.ceil(float(nodes_to_add) / grow_increment))
pool_to_shrink_target = pool_to_shrink_initial - n_iter*shrink_increment
if pool_to_shrink_target < 0:
pool_to_shrink_target = 0
# verify with the user
print((
'Shifting NodePool capacity for project = "{project}",'
'zone = "{zone}", cluster = "{cluster}"'
).format(
project=PROJECT, zone=ZONE, cluster=CLUSTER,
))
print('')
print((
'Will add {nodes_to_add} node(s) to {pool_to_grow}'
' and drain {shrink_increment} node(s) from {pool_to_shrink}'
' for every {grow_increment} node(s) added to {pool_to_grow}'
).format(
nodes_to_add=nodes_to_add, shrink_increment=shrink_increment,
grow_increment=grow_increment, pool_to_grow=pool_to_grow,
pool_to_shrink=pool_to_shrink,
))
print('')
print((
'Current pool sizes are: {{{pool_to_grow}: {pool_to_grow_curr},'
' {pool_to_shrink}: {pool_to_shrink_curr}}}'
).format(
pool_to_grow=pool_to_grow, pool_to_grow_curr=pool_to_grow_initial,
pool_to_shrink=pool_to_shrink, pool_to_shrink_curr=pool_to_shrink_initial,
))
print('')
print((
'Target pool sizes are: {{{pool_to_grow}: {pool_to_grow_target},'
' {pool_to_shrink}: {pool_to_shrink_target}}}'
).format(
pool_to_grow=pool_to_grow, pool_to_grow_target=pool_to_grow_target,
pool_to_shrink=pool_to_shrink, pool_to_shrink_target=pool_to_shrink_target,
))
print('')
prompt_confirmation()
print('')
# actually start resizing
# ignore pylint, "i" is a perfectly fine variable name for a loop counter...
# pylint: disable=invalid-name
for i in range(n_iter):
# shrink by one increment, capped at reaching zero nodes
print('Draining {shrink_increment} node(s) from {pool_to_shrink} ...'.format(
shrink_increment=shrink_increment, pool_to_shrink=pool_to_shrink,
))
new_size = max(pool_to_shrink_initial - (i*shrink_increment + shrink_increment), 0)
resize_nodepool(pool_to_shrink, new_size, PROJECT, ZONE, CLUSTER)
print('')
# ditto for growing, modulo the cap
num_to_add = min(grow_increment, pool_to_grow_target - i*grow_increment)
print('Adding {num_to_add} node(s) to {pool_to_grow} ...'.format(
num_to_add=num_to_add, pool_to_grow=pool_to_grow,
))
new_size = pool_to_grow_initial + (i*grow_increment + num_to_add)
resize_nodepool(pool_to_grow, new_size, PROJECT, ZONE, CLUSTER)
print('')
print('')
print('Done')
if __name__ == '__main__':
main()
| {
"content_hash": "a8a82dd08b00ed9a020bcc8a0af90302",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 92,
"avg_line_length": 35.348387096774196,
"alnum_prop": 0.6054024457017704,
"repo_name": "brahmaroutu/test-infra",
"id": "0bc609e0d1e18530a86ca587c587cdbed959376a",
"size": "6437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experiment/maintenance/shift_nodepool_capacity.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35881"
},
{
"name": "Dockerfile",
"bytes": "38216"
},
{
"name": "Go",
"bytes": "6132675"
},
{
"name": "HCL",
"bytes": "4918"
},
{
"name": "HTML",
"bytes": "55371"
},
{
"name": "JavaScript",
"bytes": "54961"
},
{
"name": "Jsonnet",
"bytes": "40931"
},
{
"name": "Makefile",
"bytes": "36863"
},
{
"name": "Python",
"bytes": "1192028"
},
{
"name": "Shell",
"bytes": "188188"
},
{
"name": "TypeScript",
"bytes": "197251"
}
],
"symlink_target": ""
} |
"""new pm resource
Revision ID: 7aed6936ccee
Revises: 7aad615d6979
Create Date: 2020-01-29 08:51:35.381339
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '7aed6936ccee'
down_revision = '7aad615d6979'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
# Prepare old resource column for migration.
op.alter_column('physical_measurements', 'resource', existing_type=mysql.LONGBLOB(), nullable=True)
op.execute('ALTER TABLE physical_measurements CHANGE `resource` `old_resource` longblob')
# Add new resource column and populate it with the converted fhir docs.
op.add_column('physical_measurements', sa.Column('resource', mysql.JSON(), nullable=True))
op.execute('UPDATE physical_measurements SET resource = CAST(old_resource AS CHAR(120000) CHARACTER SET utf8)')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('physical_measurements', 'resource')
op.execute('ALTER TABLE physical_measurements CHANGE `old_resource` `resource` longblob')
op.alter_column('physical_measurements', 'resource', existing_type=mysql.LONGBLOB(), nullable=False)
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"content_hash": "ad99deaf26e580b7b0c5cdc605bb6e8f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 125,
"avg_line_length": 38.40298507462686,
"alnum_prop": 0.7535950252623397,
"repo_name": "all-of-us/raw-data-repository",
"id": "5cb5e270fce49465aaac608a67c0826b7001f5dc",
"size": "2573",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/alembic/versions/7aed6936ccee_new_pm_resource.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='wl-4vqi-ooq8efl5jv!^npc*iydehqdj663*ut^@!@#z2^g-43')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
'scrapy': {
'handlers': ['console'],
'level': 'INFO',
},
},
} | {
"content_hash": "81635e2a95a7efd8d0d360f9d5279304",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 99,
"avg_line_length": 27.5,
"alnum_prop": 0.45454545454545453,
"repo_name": "bruecksen/notifhain",
"id": "27a5a28e697174b3f76800843aad5eaddb99a38b",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/development.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "61554"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
''' Bokeh is a Python interactive visualization library that targets modern
web browsers for presentation.
Its goal is to provide elegant, concise construction of versatile graphics,
and also deliver this capability with high-performance interactivity over large
or streaming datasets. Bokeh can help anyone who would like to quickly and
easily create interactive plots, dashboards, and data applications.
For full documentation, please visit: https://bokeh.pydata.org
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'__version__',
'license',
'sampledata',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def license():
''' Print the Bokeh license to the console.
Returns:
None
'''
from os.path import join
with open(join(__path__[0], 'LICENSE.txt')) as lic:
print(lic.read())
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# configure Bokeh version
from .util.version import __version__; __version__
# expose sample data module
from . import sampledata; sampledata
# configure Bokeh logger
from .util import logconfig
del logconfig
# Configure warnings to always show nice mssages, despite Python's active
# efforts to hide them from users.
import warnings
from .util.warnings import BokehDeprecationWarning, BokehUserWarning
warnings.simplefilter('always', BokehDeprecationWarning)
warnings.simplefilter('always', BokehUserWarning)
original_formatwarning = warnings.formatwarning
def _formatwarning(message, category, filename, lineno, line=None):
from .util.warnings import BokehDeprecationWarning, BokehUserWarning
if category not in (BokehDeprecationWarning, BokehUserWarning):
return original_formatwarning(message, category, filename, lineno, line)
return "%s: %s\n" % (category.__name__, message)
warnings.formatwarning = _formatwarning
del _formatwarning
del BokehDeprecationWarning, BokehUserWarning
del warnings
| {
"content_hash": "325a22c59054b80f1f122b515d11002d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 82,
"avg_line_length": 33.50526315789474,
"alnum_prop": 0.49198868991517436,
"repo_name": "stonebig/bokeh",
"id": "2aebe20114da30d711538d43c10facf4e54919c3",
"size": "3514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
} |
import unittest
from sets import Set
from common import DtraceTestCase
from analyzer.darwin.lib.dtrace.ipconnections import *
class TestIpconnections(DtraceTestCase):
def test_ipconnections_udp(self):
# given
expected = ('127.0.0.1', # host
53, # port
'UDP') # protocol
output = []
# when
for connection in ipconnections(self.current_target()):
output.append(connection)
# then
assert len(output) == 1
matched = [x for x in output if
(x.remote, x.remote_port, x.protocol) == expected]
assert len(matched) == 1
def test_ipconnections_tcp(self):
# given
expected = ('127.0.0.1', # host
80, # port
'TCP') # protocol
output = []
# when
for connection in ipconnections(self.current_target()):
output.append(connection)
# then
assert len(output) == 1
matched = [x for x in output if
(x.remote, x.remote_port, x.protocol) == expected]
assert len(matched) == 1
def test_ipconnections_tcp_with_timeout(self):
# given
expected = ('127.0.0.1', # host
80, # port
'TCP') # protocol
pids = Set()
output = []
# when
for connection in ipconnections(self.current_target(), timeout=1):
output.append(connection)
pids.add(connection.pid)
# then
assert len(pids) == 1
assert len(output) == 1
matched = [x for x in output if
(x.remote, x.remote_port, x.protocol) == expected]
assert len(matched) == 1
def test_ipconnections_empty(self):
# given
output = []
# when
for connection in ipconnections(self.current_target()):
output.append(connection)
# then
assert len(output) == 0
def test_ipconnections_target_with_args(self):
# given
expected = ('127.0.0.1', # host
80, # port
'TCP') # protocol
args = ["127.0.0.1"]
output = []
# when
for connection in ipconnections(self.current_target(), args=args):
output.append(connection)
# then
assert len(output) == 1
matched = [x for x in output if
(x.remote, x.remote_port, x.protocol) == expected]
assert len(matched) == 1
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1485f4b5d84b6d11e66c1241dfc234bd",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 68,
"avg_line_length": 26.42168674698795,
"alnum_prop": 0.6096671226630187,
"repo_name": "rodionovd/cuckoo-osx-analyzer",
"id": "29adbbad59b8b9dc589e4aeb66cc46f82953cd52",
"size": "2373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ipconnections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9292"
},
{
"name": "DTrace",
"bytes": "8642"
},
{
"name": "Python",
"bytes": "87690"
},
{
"name": "Shell",
"bytes": "29789"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register$', views.register, name='register'),
url(r'^$', views.register, name='register'),
]
| {
"content_hash": "2e53e60d813108ff9be839d431c0dd00",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 20.444444444444443,
"alnum_prop": 0.6467391304347826,
"repo_name": "kunal-exuberant/search-directory",
"id": "140eb24d25e993bf743ecb550f5d6d3f9658adf0",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DjangoServer/cookxplor/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58491"
},
{
"name": "HTML",
"bytes": "77713"
},
{
"name": "JavaScript",
"bytes": "71756"
},
{
"name": "PHP",
"bytes": "1616"
},
{
"name": "Python",
"bytes": "21120"
}
],
"symlink_target": ""
} |
import sys
import csv
import cPickle
import argparse
import numpy as np
from utils import logger, TaskMode
parser = argparse.ArgumentParser(description="PaddlePaddle CTR example")
parser.add_argument(
'--data_path', type=str, required=True, help="path of the Avazu dataset")
parser.add_argument(
'--output_dir', type=str, required=True, help="directory to output")
parser.add_argument(
'--num_lines_to_detect',
type=int,
default=500000,
help="number of records to detect dataset's meta info")
parser.add_argument(
'--test_set_size',
type=int,
default=10000,
help="size of the validation dataset(default: 10000)")
parser.add_argument(
'--train_size',
type=int,
default=100000,
help="size of the trainset (default: 100000)")
args = parser.parse_args()
'''
The fields of the dataset are:
0. id: ad identifier
1. click: 0/1 for non-click/click
2. hour: format is YYMMDDHH, so 14091123 means 23:00 on Sept. 11, 2014 UTC.
3. C1 -- anonymized categorical variable
4. banner_pos
5. site_id
6. site_domain
7. site_category
8. app_id
9. app_domain
10. app_category
11. device_id
12. device_ip
13. device_model
14. device_type
15. device_conn_type
16. C14-C21 -- anonymized categorical variables
We will treat the following fields as categorical features:
- C1
- banner_pos
- site_category
- app_category
- device_type
- device_conn_type
and some other features as id features:
- id
- site_id
- app_id
- device_id
The `hour` field will be treated as a continuous feature and will be transformed
to one-hot representation which has 24 bits.
This script will output 3 files:
1. train.txt
2. test.txt
3. infer.txt
all the files are for demo.
'''
feature_dims = {}
categorial_features = ('C1 banner_pos site_category app_category ' +
'device_type device_conn_type').split()
id_features = 'id site_id app_id device_id _device_id_cross_site_id'.split()
def get_all_field_names(mode=0):
'''
@mode: int
0 for train, 1 for test
@return: list of str
'''
return categorial_features + ['hour'] + id_features + ['click'] \
if mode == 0 else []
class CategoryFeatureGenerator(object):
'''
Generator category features.
Register all records by calling `register` first, then call `gen` to generate
one-hot representation for a record.
'''
def __init__(self):
self.dic = {'unk': 0}
self.counter = 1
def register(self, key):
'''
Register record.
'''
if key not in self.dic:
self.dic[key] = self.counter
self.counter += 1
def size(self):
return len(self.dic)
def gen(self, key):
'''
Generate one-hot representation for a record.
'''
if key not in self.dic:
res = self.dic['unk']
else:
res = self.dic[key]
return [res]
def __repr__(self):
return '<CategoryFeatureGenerator %d>' % len(self.dic)
class IDfeatureGenerator(object):
def __init__(self, max_dim, cross_fea0=None, cross_fea1=None):
'''
@max_dim: int
Size of the id elements' space
'''
self.max_dim = max_dim
self.cross_fea0 = cross_fea0
self.cross_fea1 = cross_fea1
def gen(self, key):
'''
Generate one-hot representation for records
'''
return [hash(key) % self.max_dim]
def gen_cross_fea(self, fea1, fea2):
key = str(fea1) + str(fea2)
return self.gen(key)
def size(self):
return self.max_dim
class ContinuousFeatureGenerator(object):
def __init__(self, n_intervals):
self.min = sys.maxint
self.max = sys.minint
self.n_intervals = n_intervals
def register(self, val):
self.min = min(self.minint, val)
self.max = max(self.maxint, val)
def gen(self, val):
self.len_part = (self.max - self.min) / self.n_intervals
return (val - self.min) / self.len_part
# init all feature generators
fields = {}
for key in categorial_features:
fields[key] = CategoryFeatureGenerator()
for key in id_features:
# for cross features
if 'cross' in key:
feas = key[1:].split('_cross_')
fields[key] = IDfeatureGenerator(10000000, *feas)
# for normal ID features
else:
fields[key] = IDfeatureGenerator(10000)
# used as feed_dict in PaddlePaddle
field_index = dict((key, id)
for id, key in enumerate(['dnn_input', 'lr_input', 'click']))
def detect_dataset(path, topn, id_fea_space=10000):
'''
Parse the first `topn` records to collect meta information of this dataset.
NOTE the records should be randomly shuffled first.
'''
# create categorical statis objects.
logger.warning('detecting dataset')
with open(path, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row_id, row in enumerate(reader):
if row_id > topn:
break
for key in categorial_features:
fields[key].register(row[key])
for key, item in fields.items():
feature_dims[key] = item.size()
feature_dims['hour'] = 24
feature_dims['click'] = 1
feature_dims['dnn_input'] = np.sum(
feature_dims[key] for key in categorial_features + ['hour']) + 1
feature_dims['lr_input'] = np.sum(feature_dims[key]
for key in id_features) + 1
return feature_dims
def load_data_meta(meta_path):
'''
Load dataset's meta infomation.
'''
feature_dims, fields = cPickle.load(open(meta_path, 'rb'))
return feature_dims, fields
def concat_sparse_vectors(inputs, dims):
'''
Concaterate more than one sparse vectors into one.
@inputs: list
list of sparse vector
@dims: list of int
dimention of each sparse vector
'''
res = []
assert len(inputs) == len(dims)
start = 0
for no, vec in enumerate(inputs):
for v in vec:
res.append(v + start)
start += dims[no]
return res
class AvazuDataset(object):
'''
Load AVAZU dataset as train set.
'''
def __init__(self,
train_path,
n_records_as_test=-1,
fields=None,
feature_dims=None):
self.train_path = train_path
self.n_records_as_test = n_records_as_test
self.fields = fields
# default is train mode.
self.mode = TaskMode.create_train()
self.categorial_dims = [
feature_dims[key] for key in categorial_features + ['hour']
]
self.id_dims = [feature_dims[key] for key in id_features]
def train(self):
'''
Load trainset.
'''
logger.info("load trainset from %s" % self.train_path)
self.mode = TaskMode.create_train()
with open(self.train_path) as f:
reader = csv.DictReader(f)
for row_id, row in enumerate(reader):
# skip top n lines
if self.n_records_as_test > 0 and row_id < self.n_records_as_test:
continue
rcd = self._parse_record(row)
if rcd:
yield rcd
def test(self):
'''
Load testset.
'''
logger.info("load testset from %s" % self.train_path)
self.mode = TaskMode.create_test()
with open(self.train_path) as f:
reader = csv.DictReader(f)
for row_id, row in enumerate(reader):
# skip top n lines
if self.n_records_as_test > 0 and row_id > self.n_records_as_test:
break
rcd = self._parse_record(row)
if rcd:
yield rcd
def infer(self):
'''
Load inferset.
'''
logger.info("load inferset from %s" % self.train_path)
self.mode = TaskMode.create_infer()
with open(self.train_path) as f:
reader = csv.DictReader(f)
for row_id, row in enumerate(reader):
rcd = self._parse_record(row)
if rcd:
yield rcd
def _parse_record(self, row):
'''
Parse a CSV row and get a record.
'''
record = []
for key in categorial_features:
record.append(self.fields[key].gen(row[key]))
record.append([int(row['hour'][-2:])])
dense_input = concat_sparse_vectors(record, self.categorial_dims)
record = []
for key in id_features:
if 'cross' not in key:
record.append(self.fields[key].gen(row[key]))
else:
fea0 = self.fields[key].cross_fea0
fea1 = self.fields[key].cross_fea1
record.append(
self.fields[key].gen_cross_fea(row[fea0], row[fea1]))
sparse_input = concat_sparse_vectors(record, self.id_dims)
record = [dense_input, sparse_input]
if not self.mode.is_infer():
record.append(list((int(row['click']), )))
return record
def ids2dense(vec, dim):
return vec
def ids2sparse(vec):
return ["%d:1" % x for x in vec]
detect_dataset(args.data_path, args.num_lines_to_detect)
dataset = AvazuDataset(
args.data_path,
args.test_set_size,
fields=fields,
feature_dims=feature_dims)
output_trainset_path = os.path.join(args.output_dir, 'train.txt')
output_testset_path = os.path.join(args.output_dir, 'test.txt')
output_infer_path = os.path.join(args.output_dir, 'infer.txt')
output_meta_path = os.path.join(args.output_dir, 'data.meta.txt')
with open(output_trainset_path, 'w') as f:
for id, record in enumerate(dataset.train()):
if id and id % 10000 == 0:
logger.info("load %d records" % id)
if id > args.train_size:
break
dnn_input, lr_input, click = record
dnn_input = ids2dense(dnn_input, feature_dims['dnn_input'])
lr_input = ids2sparse(lr_input)
line = "%s\t%s\t%d\n" % (' '.join(map(str, dnn_input)),
' '.join(map(str, lr_input)), click[0])
f.write(line)
logger.info('write to %s' % output_trainset_path)
with open(output_testset_path, 'w') as f:
for id, record in enumerate(dataset.test()):
dnn_input, lr_input, click = record
dnn_input = ids2dense(dnn_input, feature_dims['dnn_input'])
lr_input = ids2sparse(lr_input)
line = "%s\t%s\t%d\n" % (' '.join(map(str, dnn_input)),
' '.join(map(str, lr_input)), click[0])
f.write(line)
logger.info('write to %s' % output_testset_path)
with open(output_infer_path, 'w') as f:
for id, record in enumerate(dataset.infer()):
dnn_input, lr_input = record
dnn_input = ids2dense(dnn_input, feature_dims['dnn_input'])
lr_input = ids2sparse(lr_input)
line = "%s\t%s\n" % (' '.join(map(str, dnn_input)),
' '.join(map(str, lr_input)), )
f.write(line)
if id > args.test_set_size:
break
logger.info('write to %s' % output_infer_path)
with open(output_meta_path, 'w') as f:
lines = [
"dnn_input_dim: %d" % feature_dims['dnn_input'],
"lr_input_dim: %d" % feature_dims['lr_input']
]
f.write('\n'.join(lines))
logger.info('write data meta into %s' % output_meta_path)
| {
"content_hash": "b4d0d99ee6611bc3b165a8048316efec",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 82,
"avg_line_length": 28.29440389294404,
"alnum_prop": 0.5740820362885889,
"repo_name": "xinghai-sun/models",
"id": "dd148adc244efc64021446b17488ec7f2b1c9bd9",
"size": "11629",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ctr/avazu_data_processer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "34663"
},
{
"name": "HTML",
"bytes": "174618"
},
{
"name": "Python",
"bytes": "509771"
},
{
"name": "Shell",
"bytes": "30390"
}
],
"symlink_target": ""
} |
import os;
import sys;
import operator;
import subprocess;
from time import gmtime, strftime
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__));
TOOLS_PATH = '%s/../tools/' % (SCRIPT_PATH);
SAMSCRIPTS_PATH = '%s/../tools/samscripts/src' % (SCRIPT_PATH);
sys.path.append(SAMSCRIPTS_PATH);
import fastqparser;
import fastqfilter;
import utility_sam;
DEBUG_VERBOSE = False;
DEBUG_VERBOSE = True;
def execute_command(command):
sys.stderr.write('[Executing] "%s"\n' % (command));
subprocess.call(command, shell=True);
sys.stderr.write('\n');
### Mutates the given reference, and writes the mutations in a vcf file.
### Requires Mutatrix - it needs to be placed in the TOOLS_PATH.
def generate_mutated_reference(reference_path, snp_rate, indel_rate, out_file): # , out_path):
out_file = os.path.abspath(out_file);
reference_path = os.path.abspath(reference_path);
out_path = os.path.abspath(os.path.dirname(out_file));
if (not os.path.exists(out_path)):
os.makedirs(out_path);
# out_prefix = '%s/mutated_%s_snp%f_indel%f' % (out_path, os.path.splitext(os.path.basename(reference_path))[0], snp_rate, indel_rate);
# out_vcf = os.path.abspath('%s.vcf' % (out_prefix));
out_vcf = '%s.vcf' % (os.path.splitext(out_file)[0]);
out_rev_vcf = '%s.rev.vcf' % (os.path.splitext(out_file)[0]);
ref_ext = os.path.splitext(reference_path)[-1];
out_ref_file = out_file; # '%s%s' % (out_prefix, ref_ext);
sys.stderr.write('Mutating the reference using Mutatrix, output VCF file: "%s".\n' % (out_vcf));
if (indel_rate != 0):
execute_command('cd %s; %s/mutatrix/mutatrix --snp-rate %f --population-size 1 --microsat-min-len 0 --mnp-ratio 0 --indel-rate %f --indel-max 10 %s > %s' % (out_path, TOOLS_PATH, snp_rate, indel_rate, reference_path, out_vcf));
else:
execute_command('cd %s; %s/mutatrix/mutatrix --snp-rate %f --population-size 1 --microsat-min-len 0 --mnp-ratio 0 --indel-rate 0 --indel-max 0 %s > %s' % (out_path, TOOLS_PATH, snp_rate, reference_path, out_vcf));
sys.stderr.write('Reversing the SNP bases in the VCF file, output VCF file: "%s".\n' % (out_rev_vcf));
execute_command(r"cat %s | awk -F '\t' 'BEGIN {OFS = FS} {if ($0 == /^#.*/) print ; else {a=$4; $4=$5; $5=a; print } }' > %s" % (out_vcf, out_rev_vcf));
sys.stderr.write('Compressing and indexing the VCF file.\n');
execute_command('bgzip -c %s > %s.gz' % (out_rev_vcf, out_rev_vcf));
execute_command('tabix -p vcf %s.gz' % (out_rev_vcf));
### Mutatrix splits all reference sequences into separate files. This part of code joins them back into one FASTA file.
[headers, lengths] = fastqparser.get_headers_and_lengths(reference_path);
print headers;
all_files = ['"%s/1:%s:0%s"' % (out_path, header.split(' ')[0], ref_ext) for header in headers];
# all_files = ['"%s/1:%s:0%s"' % (out_path, header, ref_ext) for header in headers];
if (os.path.exists(out_ref_file)):
os.rename(out_ref_file, '%s.bak' % (out_ref_file));
for ref_file in all_files:
### Take care of the special characters.
# escaped_ref_file = ref_file.replace('|', '\|');
escaped_ref_file = ref_file;
print escaped_ref_file;
execute_command('cat %s >> %s' % (escaped_ref_file, out_ref_file));
if (len(ref_file) > 0 and ('*' in ref_file) == False):
print 'Removing file: "%s".' % (ref_file);
# os.remove(escaped_ref_file);
execute_command('rm %s' % (escaped_ref_file));
def run_poa_sequentially_v2(seq_path, out_consensus_file):
temp_subseq_file = '%s/tmp.subseq.fasta' % (os.path.dirname(out_consensus_file));
temp_msa_file = '%s/tmp.subseq.fasta.pir' % (os.path.dirname(out_consensus_file));
# out_consensus_file = '%s/consensus-poa.fasta' % (os.path.dirname(seq_path));
out_consensus_file_chunks = '%s/tmp.consensus.chunks.fasta' % (os.path.dirname(out_consensus_file));
fp_out_all = open(out_consensus_file, 'w');
fp_out_chunks = open(out_consensus_file_chunks, 'w');
timestamp = strftime("%Y/%m/%d %H:%M:%S", gmtime());
fp_out_all.write('>Consensus_with_POA all %s\n' % (timestamp));
print 'seq_path = "%s"' % (seq_path);
[ret_string, num_seqs, total_seq_len, average_seq_len, max_seq_len] = fastqparser.count_seq_length(seq_path);
window_len = 5000;
# window_len = 1000;
# window_len = max_seq_len;
start_coord = 0;
while (start_coord < max_seq_len):
end_coord = start_coord + window_len;
if (end_coord > (max_seq_len - window_len)):
end_coord = max_seq_len;
sys.stderr.write('Window: start = %d, end = %d\n' % (start_coord, end_coord));
execute_command('%s/fastqfilter.py subseqs %s %d %d %s' % (SAMSCRIPTS_PATH, seq_path, start_coord, end_coord, temp_subseq_file));
# if (start_coord == 0 or end_coord == max_seq_len):
# execute_command('%s/poaV2/poa -do_progressive -read_fasta %s -pir %s %s/poaV2/blosum80.mat' % (TOOLS_PATH, temp_subseq_file, temp_msa_file, TOOLS_PATH));
# execute_command('%s/poaV2/poa -do_progressive -read_fasta %s -pir %s %s/poaV2/all1.mat' % (TOOLS_PATH, temp_subseq_file, temp_msa_file, TOOLS_PATH));
# else:
execute_command('%s/poaV2/poa -do_global -do_progressive -read_fasta %s -pir %s %s/poaV2/blosum80.mat' % (TOOLS_PATH, temp_subseq_file, temp_msa_file, TOOLS_PATH));
# execute_command('%s/poaV2/poa -do_global -do_progressive -read_fasta %s -pir %s %s/poaV2/all1.mat' % (TOOLS_PATH, temp_subseq_file, temp_msa_file, TOOLS_PATH));
timestamp = strftime("%Y/%m/%d %H:%M:%S", gmtime());
fp_out_chunks.write('>Consensus_with_POA %d-%d %s\n' % (start_coord, end_coord, timestamp));
[headers, seqs, quals] = fastqparser.read_fastq(temp_msa_file);
cons_seq = '';
for i in xrange(0, len(seqs[0])):
base_counts = {'A': 0, 'C': 0, 'T': 0, 'G': 0, '.': 0};
for j in xrange(0, len(seqs)):
base_counts[seqs[j][i]] += 1;
sorted_base_counts = sorted(base_counts.items(), key=operator.itemgetter(1));
# print sorted_base_counts;
if (sorted_base_counts[-1][0] != '.'):
cons_seq += sorted_base_counts[-1][0]
fp_out_all.write('%s' % (cons_seq));
fp_out_chunks.write('%s\n' % (cons_seq));
# # print temp_subseq_file;
# # print headers;
# i = 0;
# while (i < len(headers)):
# if ('consensus' in headers[i]):
# # print seqs[i];
# # print seqs[i].replace('.', '');
# chunk_seq = seqs[i].replace('.', '');
# fp_out_all.write('%s' % (chunk_seq));
# fp_out_chunks.write('%s\n' % (chunk_seq));
# break;
# i += 1;
# break;
start_coord = end_coord;
fp_out_all.write('\n');
fp_out_all.close();
fp_out_chunks.close();
### SAM lines are expected to be sorted. sam_line1 should come before sam_line2 (that is sam_line1.pos should be <= sam_line2.pos).
### min_overlap_percent specifies the minimum percentage of each read that should be covered by the overlap. This should help avoid introduction of long indels.
def check_overlap(sam_line1, sam_line2, min_overlap_percent):
start1 = sam_line1.pos - 1;
# end1 = start1 + len(sam_line1.seq) - sam_line1.clip_count_front - sam_line1.clip_count_back;
end1 = start1 + sam_line1.CalcReferenceLengthFromCigar();
start2 = sam_line2.pos - 1;
# end2 = start2 + len(sam_line2.seq) - sam_line2.clip_count_front - sam_line2.clip_count_back;
end2 = start2 + sam_line2.CalcReferenceLengthFromCigar();
### In this case, reads do not overlap.
if (start2 > end1 or end2 < start1):
return 0;
### One read is contained in the other.
if ((start2 >= start1 and end2 <= end1) or (start1 >= start2 and end1 <= end2)):
return -1;
# overlap_len = (end2 - start1);
overlap_len = (end1 - start2);
### The reads *do* overlap, but the length of the overlap is too short, so dismiss the overlap.
# if (overlap_len < min_overlap_percent*(end1 - start1) or overlap_len < min_overlap_percent*(end2 - start2)):
if (overlap_len < min_overlap_percent):
print 'overlap_len = ', overlap_len;
print 'start1 = %d, end1 = %d' % (start1, end1);
print 'start2 = %d, end2 = %d' % (start2, end2);
print 'min_overlap_percent*(end1 - start1) = ', min_overlap_percent*(end1 - start1);
print 'min_overlap_percent*(end2 - start2) = ', min_overlap_percent*(end2 - start2);
print 'len(sam_line1.seq) = %d' % (len(sam_line1.seq));
print 'len(sam_line2.seq) = %d' % (len(sam_line2.seq));
print '';
# print 'Tu sam 1!';
# exit(1);
# return -2;
return 0;
# return overlap_len;
joined_length = (end2 - start1);
return joined_length;
def construct_contig_from_overlapping_sams(ctg_seqs, contig_sams):
new_contig = '';
overhang_before = contig_sams[0].pos - 1;
if (DEBUG_VERBOSE == True):
print 'overhang_before = %d' % (contig_sams[0].pos - 1);
new_contig += ctg_seqs[0][0:overhang_before];
non_clipped_len = 0;
i = 0;
while (i < len(contig_sams)):
sam_line = contig_sams[i];
start_pos = sam_line.clip_count_front;
end_pos = (len(sam_line.seq) - sam_line.clip_count_back) if ((i + 1) == len(contig_sams)) else sam_line.FindBasePositionOnRead(contig_sams[i+1].pos - 1); # (contig_sams[i+1].pos - sam_line.pos);
if (DEBUG_VERBOSE == True):
print 'start_pos = %d' % (start_pos);
print 'end_pos = %d' % (end_pos);
print 'len(new_contig) = %d' % (len(new_contig));
print 'contig_sams[i].pos - 1 = %d' % (contig_sams[i].pos - 1);
print 'contig_sams[i].CalcReferenceLengthFromCigar() = %d' % (contig_sams[i].CalcReferenceLengthFromCigar());
print '(contig_sams[i+1].pos - 1) = %d' % (-1 if ((i+1) == len(contig_sams)) else (contig_sams[i+1].pos - 1));
print '(contig_sams[i].pos - 1) = %d' % ((contig_sams[i].pos - 1));
print 'sam_line.CalcReferenceLengthFromCigar() = %d' % sam_line.CalcReferenceLengthFromCigar();
print 'contig_sams[i].pos - 1 + sam_line.CalcReferenceLengthFromCigar() = %d' % (contig_sams[i].pos - 1 + sam_line.CalcReferenceLengthFromCigar());
print 'len(sam_line.seq) = %d' % (len(sam_line.seq));
print 'sam_line.CalcReadLengthFromCigar() = %d' % (sam_line.CalcReadLengthFromCigar() - sam_line.clip_count_front - sam_line.clip_count_back);
new_chunk = sam_line.seq[start_pos:end_pos];
non_clipped_len += len(new_chunk);
new_contig += new_chunk;
i += 1;
if (DEBUG_VERBOSE == True):
print 'len(new_contig) = %d' % (len(new_contig));
print '';
# overhang_after = contig_sams[-1].pos + len(contig_sams[-1].seq) - contig_sams[-1].clip_count_front - contig_sams[-1].clip_count_back;
overhang_after = contig_sams[-1].pos - 1 + contig_sams[-1].CalcReferenceLengthFromCigar();
if (DEBUG_VERBOSE == True):
print 'len(new_contig) = %d' % (len(new_contig));
print 'overhang_after = %d' % (overhang_after);
print 'contig_sams[-1].pos - 1 = %d' % (contig_sams[-1].pos - 1);
print 'contig_sams[-1].CalcReferenceLengthFromCigar() = %d' % (contig_sams[-1].CalcReferenceLengthFromCigar());
new_contig += ctg_seqs[0][overhang_after:-1];
if (DEBUG_VERBOSE == True):
print 'len(new_contig) after adding the overhang = %d' % (len(new_contig));
print 'len(ctg_seqs[0]) = %d' % (len(ctg_seqs[0]));
return [new_contig, non_clipped_len];
### Parameter 'single_contig_file' is the path to a file containing only single contig sequence. If the original contig file was a multifasta, then a single contig
### from that multifasta needs to be extracted to a separate file, which is the file provided through this parameter.
### Parameter 'out_alt_ctg_file' will contain all the alternate contigs for the given input contig.
def extract_alternate_contigs(single_contig_file, reads_file, out_alt_ctg_file, ref_file=''):
### Generate file paths for some temporary files.
path_aligns_basename = '%s/tmp.allreads' % (os.path.dirname(out_alt_ctg_file));
path_aligns = '%s.sam' % (path_aligns_basename);
path_aligns_sorted_basename = '%s.sorted' % (path_aligns_basename);
path_aligns_sorted_sam = '%s.sam' % (path_aligns_sorted_basename);
path_alt_contig_sams = '%s.altctgs.sam' % (path_aligns_basename);
if (not os.path.exists(os.path.dirname(out_alt_ctg_file))):
os.path.makedirs(os.path.dirname(out_alt_ctg_file));
### Generate alignments.
execute_command('%s/graphmap/bin/Linux-x64/graphmap -a anchor -b 3 -r %s -d %s -o %s' % (TOOLS_PATH, single_contig_file, reads_file, path_aligns));
execute_command('samtools view -Sb %s | samtools sort - %s && samtools view -h %s.bam > %s' % (path_aligns, path_aligns_sorted_basename, path_aligns_sorted_basename, path_aligns_sorted_sam));
[ctg_headers, ctg_seqs, ctg_quals] = fastqparser.read_fastq(single_contig_file);
[headers, all_sam_lines] = utility_sam.LoadSAM(path_aligns_sorted_sam);
sys.stderr.write('Number of lines in the original SAM file: %d\n' % (len(all_sam_lines)));
sam_lines = [];
for sam_line in all_sam_lines:
if (sam_line.IsMapped() == False):
continue;
seq_len = len(sam_line.seq) - sam_line.clip_count_front - sam_line.clip_count_back;
cigop_counts = sam_line.CountAlignmentOps();
# print cigop_counts;
# if (cigop_counts == {}):
# print sam_line.original_line;
### Check if the CIGAR string is actually in the extended format.
if ('M' in cigop_counts):
sys.stderr.write('Warning: alignment does not contain the *extended* CIGAR format! Skipping alignment.\n');
exit(1);
else:
matches = cigop_counts['='];
errors = cigop_counts['X'] + cigop_counts['D'] + cigop_counts['I'];
if (float(matches) / float(seq_len) >= 0.70 and float(errors) / float(seq_len) < 0.40):
sam_lines.append(sam_line);
sys.stderr.write('Number of filtered SAM lines (only mapped and with errors below threshold): %d\n' % (len(sam_lines)));
fp_out_alt_ctg = open(out_alt_ctg_file, 'w');
fp_out_alt_ctg_sams = open(path_alt_contig_sams, 'w');
fp_out_alt_ctg_sams.write('\n'.join(headers) + '\n');
### Find alternate contigs from alignments.
sams_to_process = sam_lines;
coverage = 0;
while (coverage < 100 and len(sams_to_process) > 0):
coverage += 1;
print '---------------------------------------';
print 'Coverage = %d' % (coverage);
sys.stderr.write('Number of alignments in pool: %d\n' % (len(sams_to_process)));
contig_sams = [];
unused_sams = [];
i = 0;
candidate_read = i;
contig_sams.append(sams_to_process[candidate_read]);
# for candidate_read in xrange((i+1), len(sams_to_process)):
start1 = sams_to_process[candidate_read].pos - 1;
end1 = start1 + sams_to_process[candidate_read].CalcReferenceLengthFromCigar();
print 'candidate: start = %d, end = %d' % (start1, end1);
while ((candidate_read + 1) < len(sams_to_process)):
max_overlap_len = 0;
max_overlap_id = -1;
# j = candidate_read + 1;
# while (j < len(sams_to_process)):
for j in xrange(candidate_read + 1, len(sams_to_process)):
overlap_len = check_overlap(sams_to_process[candidate_read], sams_to_process[j], 0);
if (overlap_len == 0):
print 'break 1';
print ' j = %d (in the range of %d to %d)' % (j, candidate_read + 1, len(sams_to_process));
break;
elif (overlap_len == -1 or overlap_len == -2): ### -1 is for contained sequences, and -2 is for overlaps which are below the threshold.
# j += 1;
continue;
if (max_overlap_id == -1 or overlap_len >= max_overlap_len):
max_overlap_len = overlap_len;
max_overlap_id = j;
# j += 1;
if (max_overlap_id > 0):
print ' starting read = %d' % (candidate_read);
print ' candidate_read = %d' % (max_overlap_id);
print ' max_overlap_len = %d' % (max_overlap_len);
print ' unused overlapping reads: %d - %d' % ((candidate_read + 1), max_overlap_id);
start1 = sams_to_process[max_overlap_id].pos - 1;
end1 = start1 + sams_to_process[max_overlap_id].CalcReferenceLengthFromCigar();
print ' candidate: start = %d, end = %d' % (start1, end1);
unused_sams += sams_to_process[(candidate_read + 1):max_overlap_id];
candidate_read = max_overlap_id;
contig_sams.append(sams_to_process[candidate_read]);
else:
print 'break 2';
break;
print ' unused reads: %d - %d' % ((candidate_read + 1), len(sams_to_process));
unused_sams += sams_to_process[(candidate_read + 1):len(sams_to_process)];
sams_to_process = unused_sams + [];
# if ((candidate_read + 1) == len(sam_lines)):
# break;
# i += 1;
# max_overlap_len = 0;
# max_overlap_id = -1;
# while (i < len(sam_lines)):
# overlap_len = check_overlap(sam_lines[candidate_read], sam_lines[i + 1]);
# if ((i + 1) >= len(sam_lines) or overlap_len <= 0):
# break;
# else:
# unused_sams.append(sam_lines[i]);
# overlap_len = check_overlap(sam_lines[candidate_read], sam_lines[i]);
# if (overlap_len >= max_overlap_len):
# max_overlap_len = overlap_len;
# max_overlap_id = i;
# i += 1;
# contig_sams.append(sam_lines[candidate_read]);
# # candidate_read = i;
# if (max_overlap_id > 0):
# candidate_read = max_overlap_id;
# else:
# break;
# i += 1;
print ' after coverage %d:' % (coverage);
print ' len(sams_to_process) = %d' % (len(sams_to_process));
print ' len(contig_sams) = %d' % len(contig_sams);
print ' len(unused_sams) = %d' % len(unused_sams);
[new_contig, non_clipped_len] = construct_contig_from_overlapping_sams(ctg_seqs, contig_sams);
print '********************* len(new_contig) = %d, non_clipped_len = %d' % (len(new_contig), non_clipped_len);
if (float(non_clipped_len) < 0.85*float(len(ctg_seqs[0]))):
# print 'Tu sam!';
# exit(1);
continue;
else:
print '++++++++++++++++++++++++++++++++++++++++';
fp_out_alt_ctg.write('>%s %d\n' % (ctg_headers[0], coverage));
fp_out_alt_ctg.write('%s\n' % (new_contig));
for sam_line in contig_sams:
fp_out_alt_ctg_sams.write(sam_line.original_line + '\n');
fp_out_alt_ctg_sams.write('\n');
fp_out_alt_ctg_sams.close();
fp_out_alt_ctg.close();
def TEST_SIMULATE():
for i in xrange(0, 10):
# generate_mutated_reference(('%s/../reference-genomes/escherichia_coli.fa' % SCRIPT_PATH), 0.0006, 0.0067, 'data/test-msa/ecoli-%d.fa' % (i));
# seqs_for_mafft
if (i == 0):
execute_command('cat %s > %s' % ('data/test-msa/ecoli-%d.fa' % (i), 'data/test-msa/ecoli-all.fa'));
else:
execute_command('cat %s >> %s' % ('data/test-msa/ecoli-%d.fa' % (i), 'data/test-msa/ecoli-all.fa'));
def TEST_SAM_TO_CONTIG(single_contig_file, contig_sam, output_alt_contig_fasta):
[ctg_headers, ctg_seqs, ctg_quals] = fastqparser.read_fastq(single_contig_file);
[headers, contig_sams] = utility_sam.LoadSAM(contig_sam);
[new_contig, non_clipped_len] = construct_contig_from_overlapping_sams(ctg_seqs, contig_sams);
fp = open(output_alt_contig_fasta, 'w');
fp.write('>Alternate contig\n');
fp.write('%s\n' % new_contig);
fp.close();
### This function opens the contigs file and separates contigs one-by-one to a temporary file, to which reads will be aligned to.
def process_contigs(contigs_file, reads_file, out_file):
try:
fp_in = open(contigs_file, 'r');
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % contigs_file);
return;
single_contig_file = '%s/temp/tmp.singlecontig.raw.fasta' % (os.path.dirname(out_file));
out_alt_ctg_file = '%s/temp/tmp.singlecontig.alt.fasta' % (os.path.dirname(out_file));
out_consensus_single_contig = '%s/temp/tmp.singlecontig.cons.fasta' % (os.path.dirname(out_file));
try:
fp_out_single_contig = open(single_contig_file, 'w');
fp_out_single_contig.close();
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % single_contig_file);
return;
try:
fp_out = open(out_file, 'a');
fp_out.close();
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % out_file);
return;
while True:
[header, read] = fastqparser.get_single_read(fp_in);
if (len(header) == 0):
break;
seq = read[1];
fp_out_single_contig = open(single_contig_file, 'w');
fp_out_single_contig.write('>%s\n' % header);
fp_out_single_contig.write('%s\n' % seq);
fp_out_single_contig.close();
extract_alternate_contigs(single_contig_file, reads_file, out_alt_ctg_file);
run_poa_sequentially_v2(out_alt_ctg_file, out_consensus_single_contig);
execute_command('cat %s >> %s' % (out_consensus_single_contig, out_file));
fp_in.close();
def backup_old_polish_results(polished_fasta, backup_fasta):
polished_fasta = os.path.abspath(polished_fasta);
backup_fasta = os.path.abspath(backup_fasta);
if (os.path.exists(polished_fasta)):
execute_command('cat %s >> %s' % (polished_fasta, backup_fasta));
os.remove(polished_fasta);
def RUN_TEST_SAM_TO_CONTIG():
single_contig_file = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/out/temp/tmp.singlecontig.raw.fasta';
contig_sam = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/out/temp/third.sam';
output_alt_contig_fasta = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/out/temp/third.fasta';
TEST_SAM_TO_CONTIG(single_contig_file, contig_sam, output_alt_contig_fasta);
exit(1);
def TEST_SAMPLE_DATA():
# run_poa_sequentially('data/test-msa/ecoli-all.fa');
# contigs_file = 'tests/layout_20151114_221431/contigs_fast.fasta';
contigs_file = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/miniasm/layout.fasta';
reads_file = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/sample-dataset/reads-lambda-R73.fasta';
reads_file = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/sample-dataset/reads-lambda-R73-without_problematic.fasta';
out_file = '/home/isovic/work/eclipse-workspace/git/ra-consensus/tests/out/contigs.consensus.fasta';
# out_sam_file = 'tests/out/contigs.same.sam';
backup_old_polish_results(out_file, '%s.bak' % (out_file));
process_contigs(contigs_file, reads_file, out_file);
execute_command('dnadiff -p tests/out/temp/dnadiff/raw tests/sample-dataset/NC_001416.fa tests/miniasm/layout.fasta');
execute_command('dnadiff -p tests/out/temp/dnadiff/polished tests/sample-dataset/NC_001416.fa tests/out/contigs.consensus.fasta');
# make_consensus_reference_from_vcf(sys.argv[1], sys.argv[2], sys.argv[3]);
exit(1);
def main():
# TEST_SIMULATE();
# RUN_TEST_SAM_TO_CONTIG();
TEST_SAMPLE_DATA();
# if (len(sys.argv) == 1):
# TEST_SAMPLE_DATA();
if (len(sys.argv) != 4):
sys.stderr.write('Proof-of-Concept Consensus for de novo genome assemblies.\n');
sys.stderr.write('This consensus tool first creates several instances of the contig through alignment (e.g. 10x; these instances have similar error rate to the original data), and then applies POA on chunks (windows) alternate contigs to produce consensus.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s <reads.fasta> <contigs.fasta> <out_polished.fasta>\n' % (sys.argv[0]));
exit(1);
reads_file = os.path.abspath(sys.argv[1]);
contigs_file = os.path.abspath(sys.argv[2]);
out_file = os.path.abspath(sys.argv[3]);
out_folder = os.path.dirname(out_file);
out_basename = os.path.splitext(os.path.basename(out_file))[0];
out_sam_file = '%s/tmp-%s/%s.all.sam' % (out_folder, out_basename, out_basename);
if __name__ == "__main__":
main();
| {
"content_hash": "8a69a75ed34474e6c17687aa88ab9266",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 263,
"avg_line_length": 43.11864406779661,
"alnum_prop": 0.6564028651292803,
"repo_name": "isovic/ra-consensus",
"id": "0f82cc24ee6cd2cb1a73543af2ed5b8cdac84fa3",
"size": "22961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/poacons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "160117"
}
],
"symlink_target": ""
} |
import os
import struct
from litex.gen import *
from litex.soc.interconnect import wishbone
class MemoryMustHaveContents(Memory):
@staticmethod
def emit_verilog(memory, ns, add_data_file):
assert memory.init, "ROM contents not found! {}".format(memory.filename)
return Memory.emit_verilog(memory, ns, add_data_file)
class FirmwareROM(wishbone.SRAM):
def __init__(self, size, filename):
if os.path.exists(filename):
data = []
with open(filename, "rb") as firmware_file:
while True:
w = firmware_file.read(4)
if not w:
break
data.append(struct.unpack(">I", w)[0])
data_size = len(data)*4
assert data_size > 0
assert data_size < size, (
"Firmware is too big! {} bytes > {} bytes".format(
data_size, size))
print("Firmware {} bytes ({} bytes left)".format(
data_size, size-data_size))
wishbone.SRAM.__init__(self, size, init=data)
else:
print("No firmware found! ({}) Won't compile.".format(
filename))
wishbone.SRAM.__init__(self, size)
self.mem.__class__ = MemoryMustHaveContents
self.mem.filename = filename
| {
"content_hash": "0632397c149a0799c48e500f93dcb594",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 34.58974358974359,
"alnum_prop": 0.541882876204596,
"repo_name": "cr1901/HDMI2USB-litex-firmware",
"id": "f2a7b11c7e0a12d2b2a8819416a5e2db19e9af37",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gateware/firmware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "343104"
},
{
"name": "C++",
"bytes": "483"
},
{
"name": "Makefile",
"bytes": "34333"
},
{
"name": "Python",
"bytes": "415338"
},
{
"name": "Shell",
"bytes": "67065"
},
{
"name": "VHDL",
"bytes": "387579"
},
{
"name": "Verilog",
"bytes": "9978"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.contrib.contenttypes.models import ContentType
import os
import json
from django.db import IntegrityError
from django.core.urlresolvers import reverse
from django.core.files.storage import default_storage, get_storage_class
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import HttpResponse, HttpResponseBadRequest, \
HttpResponseRedirect, HttpResponseForbidden, HttpResponseNotFound,\
HttpResponseServerError
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader, RequestContext
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST,\
require_http_methods
from google_doc import GoogleDoc
from guardian.shortcuts import assign_perm, remove_perm, get_users_with_perms
from main.forms import UserProfileForm, FormLicenseForm, DataLicenseForm,\
SupportDocForm, QuickConverterFile, QuickConverterURL, QuickConverter,\
SourceForm, PermissionForm, MediaForm, MapboxLayerForm, \
ActivateSMSSupportFom
from main.models import UserProfile, MetaData
from odk_logger.models import Instance, XForm
from odk_logger.views import enter_data
from odk_viewer.models import DataDictionary, ParsedInstance
from odk_viewer.models.data_dictionary import upload_to
from odk_viewer.models.parsed_instance import GLOBAL_SUBMISSION_STATS,\
DATETIME_FORMAT
from odk_viewer.views import survey_responses, attachment_url
from stats.models import StatsCount
from stats.tasks import stat_log
from utils.decorators import is_owner
from utils.logger_tools import response_with_mimetype_and_name, publish_form
from utils.user_auth import check_and_set_user, set_profile_data,\
has_permission, helper_auth_helper, get_xform_and_perms,\
check_and_set_user_and_form, add_cors_headers
from utils.log import audit_log, Actions
from main.models import AuditLog
from django.conf import settings
from utils.viewer_tools import enketo_url
from utils.qrcode import generate_qrcode
from sms_support.tools import check_form_sms_compatibility, is_sms_related
from sms_support.autodoc import get_autodoc_for
from sms_support.providers import providers_doc
from registration.signals import user_registered
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(user_registered, dispatch_uid='auto_add_crowdform')
def auto_add_crowd_form_to_registered_user(sender, **kwargs):
new_user = kwargs.get('user')
if hasattr(settings, 'AUTO_ADD_CROWDFORM') and \
settings.AUTO_ADD_CROWDFORM and \
hasattr(settings, 'DEFAULT_CROWDFORM'):
try:
default_crowdform = settings.DEFAULT_CROWDFORM
if isinstance(default_crowdform, dict) and\
'xform_username' in default_crowdform and\
'xform_id_string' in default_crowdform:
xform = XForm.objects.get(
id_string=default_crowdform['xform_id_string'],
user__username=default_crowdform['xform_username'])
MetaData.crowdform_users(xform, new_user.username)
except XForm.DoesNotExist:
pass
def home(request):
if request.user.username:
return HttpResponseRedirect(
reverse(profile, kwargs={'username': request.user.username}))
context = RequestContext(request)
return render_to_response('home.html', context_instance=context)
@login_required
def login_redirect(request):
return HttpResponseRedirect(reverse(profile,
kwargs={'username': request.user.username}))
@require_POST
@login_required
def clone_xlsform(request, username):
"""
Copy a public/Shared form to a users list of forms.
Eliminates the need to download Excel File and upload again.
"""
to_username = request.user.username
context = RequestContext(request)
context.message = {'type': None, 'text': '....'}
def set_form():
form_owner = request.POST.get('username')
id_string = request.POST.get('id_string')
xform = XForm.objects.get(user__username=form_owner,
id_string=id_string)
if len(id_string) > 0 and id_string[0].isdigit():
id_string = '_' + id_string
path = xform.xls.name
if default_storage.exists(path):
xls_file = upload_to(None, '%s%s.xls' % (
id_string, XForm.CLONED_SUFFIX), to_username)
xls_data = default_storage.open(path)
xls_file = default_storage.save(xls_file, xls_data)
context.message = u'%s-%s' % (form_owner, xls_file)
survey = DataDictionary.objects.create(
user=request.user,
xls=xls_file
).survey
# log to cloner's account
audit = {}
audit_log(
Actions.FORM_CLONED, request.user, request.user,
_("Cloned form '%(id_string)s'.") %
{
'id_string': survey.id_string,
}, audit, request)
clone_form_url = reverse(
show, kwargs={
'username': to_username,
'id_string': xform.id_string + XForm.CLONED_SUFFIX})
return {
'type': 'alert-success',
'text': _(u'Successfully cloned to %(form_url)s into your '
u'%(profile_url)s') %
{'form_url': u'<a href="%(url)s">%(id_string)s</a> ' % {
'id_string': survey.id_string,
'url': clone_form_url
},
'profile_url': u'<a href="%s">profile</a>.' %
reverse(profile, kwargs={'username': to_username})}
}
form_result = publish_form(set_form)
if form_result['type'] == 'alert-success':
# comment the following condition (and else)
# when we want to enable sms check for all.
# until then, it checks if form barely related to sms
if is_sms_related(form_result.get('form_o')):
form_result_sms = check_form_sms_compatibility(form_result)
context.message_list = [form_result, form_result_sms]
else:
context.message = form_result
else:
context.message = form_result
if request.is_ajax():
res = loader.render_to_string(
'message.html',
context_instance=context).replace("'", r"\'").replace('\n', '')
return HttpResponse(
"$('#mfeedback').html('%s').show();" % res)
else:
return HttpResponse(context.message['text'])
def profile(request, username):
context = RequestContext(request)
content_user = get_object_or_404(User, username=username)
context.form = QuickConverter()
# xlsform submission...
if request.method == 'POST' and request.user.is_authenticated():
def set_form():
form = QuickConverter(request.POST, request.FILES)
survey = form.publish(request.user).survey
audit = {}
audit_log(
Actions.FORM_PUBLISHED, request.user, content_user,
_("Published form '%(id_string)s'.") %
{
'id_string': survey.id_string,
}, audit, request)
enketo_webform_url = reverse(
enter_data,
kwargs={'username': username, 'id_string': survey.id_string}
)
return {
'type': 'alert-success',
'preview_url': reverse(enketo_preview, kwargs={
'username': username,
'id_string': survey.id_string
}),
'text': _(u'Successfully published %(form_id)s.'
u' <a href="%(form_url)s">Enter Web Form</a>'
u' or <a href="#preview-modal" data-toggle="modal">'
u'Preview Web Form</a>')
% {'form_id': survey.id_string,
'form_url': enketo_webform_url},
'form_o': survey
}
form_result = publish_form(set_form)
if form_result['type'] == 'alert-success':
# comment the following condition (and else)
# when we want to enable sms check for all.
# until then, it checks if form barely related to sms
if is_sms_related(form_result.get('form_o')):
form_result_sms = check_form_sms_compatibility(form_result)
context.message_list = [form_result, form_result_sms]
else:
context.message = form_result
else:
context.message = form_result
# profile view...
# for the same user -> dashboard
if content_user == request.user:
context.show_dashboard = True
context.all_forms = content_user.xforms.count()
context.form = QuickConverterFile()
context.form_url = QuickConverterURL()
context.odk_url = request.build_absolute_uri(
"/%s" % request.user.username)
xforms = XForm.objects.filter(user=content_user)\
.select_related('user', 'surveys')
context.user_xforms = xforms
crowdforms = XForm.objects.filter(
metadata__data_type=MetaData.CROWDFORM_USERS,
metadata__data_value=username,)\
.select_related('user')
context.crowdforms = crowdforms
# forms shared with user
xfct = ContentType.objects.get(app_label='odk_logger', model='xform')
xfs = content_user.userobjectpermission_set.filter(content_type=xfct)
shared_forms_pks = list(set([xf.object_pk for xf in xfs]))
context.forms_shared_with = XForm.objects.filter(
pk__in=shared_forms_pks).exclude(user=content_user)\
.select_related('user')
# for any other user -> profile
set_profile_data(context, content_user)
return render_to_response("profile.html", context_instance=context)
def members_list(request):
if not request.user.is_staff and not request.user.is_superuser:
return HttpResponseForbidden(_(u'Forbidden.'))
context = RequestContext(request)
users = User.objects.all()
context.template = 'people.html'
context.users = users
return render_to_response("people.html", context_instance=context)
@login_required
def profile_settings(request, username):
context = RequestContext(request)
content_user = check_and_set_user(request, username)
context.content_user = content_user
profile, created = UserProfile.objects.get_or_create(user=content_user)
if request.method == 'POST':
form = UserProfileForm(request.POST, instance=profile)
if form.is_valid():
# get user
# user.email = cleaned_email
form.instance.user.email = form.cleaned_data['email']
form.instance.user.save()
form.save()
# todo: add string rep. of settings to see what changed
audit = {}
audit_log(
Actions.PROFILE_SETTINGS_UPDATED, request.user, content_user,
_("Profile settings updated."), audit, request)
return HttpResponseRedirect(reverse(
public_profile, kwargs={'username': request.user.username}
))
else:
form = UserProfileForm(
instance=profile, initial={"email": content_user.email})
return render_to_response("settings.html", {'form': form},
context_instance=context)
@require_GET
def public_profile(request, username):
content_user = check_and_set_user(request, username)
if isinstance(content_user, HttpResponseRedirect):
return content_user
context = RequestContext(request)
set_profile_data(context, content_user)
context.is_owner = request.user == content_user
audit = {}
audit_log(
Actions.PUBLIC_PROFILE_ACCESSED, request.user, content_user,
_("Public profile accessed."), audit, request)
return render_to_response("profile.html", context_instance=context)
@login_required
def dashboard(request):
context = RequestContext(request)
context.form = QuickConverter()
content_user = request.user
set_profile_data(context, content_user)
context.odk_url = request.build_absolute_uri("/%s" % request.user.username)
return render_to_response("dashboard.html", context_instance=context)
@require_GET
def show(request, username=None, id_string=None, uuid=None):
if uuid:
xform = get_object_or_404(XForm, uuid=uuid)
request.session['public_link'] = \
xform.uuid if MetaData.public_link(xform) else False
return HttpResponseRedirect(reverse(show, kwargs={
'username': xform.user.username,
'id_string': xform.id_string
}))
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
# no access
if not (xform.shared or can_view or request.session.get('public_link')):
return HttpResponseRedirect(reverse(home))
context = RequestContext(request)
context.cloned = len(
XForm.objects.filter(user__username=request.user.username,
id_string=id_string + XForm.CLONED_SUFFIX)
) > 0
context.public_link = MetaData.public_link(xform)
context.is_owner = is_owner
context.can_edit = can_edit
context.can_view = can_view or request.session.get('public_link')
context.xform = xform
context.content_user = xform.user
context.base_url = "https://%s" % request.get_host()
context.source = MetaData.source(xform)
context.form_license = MetaData.form_license(xform).data_value
context.data_license = MetaData.data_license(xform).data_value
context.supporting_docs = MetaData.supporting_docs(xform)
context.media_upload = MetaData.media_upload(xform)
context.mapbox_layer = MetaData.mapbox_layer_upload(xform)
if is_owner:
context.sms_support_form = ActivateSMSSupportFom(
initial={'enable_sms_support': xform.allows_sms,
'sms_id_string': xform.sms_id_string})
if not xform.allows_sms:
context.sms_compatible = check_form_sms_compatibility(
None, json_survey=json.loads(xform.json))
else:
url_root = request.build_absolute_uri('/')[:-1]
context.sms_providers_doc = providers_doc(
url_root=url_root,
username=username,
id_string=id_string)
context.url_root = url_root
context.form_license_form = FormLicenseForm(
initial={'value': context.form_license})
context.data_license_form = DataLicenseForm(
initial={'value': context.data_license})
context.doc_form = SupportDocForm()
context.source_form = SourceForm()
context.media_form = MediaForm()
context.mapbox_layer_form = MapboxLayerForm()
users_with_perms = []
for perm in get_users_with_perms(xform, attach_perms=True).items():
has_perm = []
if 'change_xform' in perm[1]:
has_perm.append(_(u"Can Edit"))
if 'view_xform' in perm[1]:
has_perm.append(_(u"Can View"))
users_with_perms.append((perm[0], u" | ".join(has_perm)))
context.users_with_perms = users_with_perms
context.permission_form = PermissionForm(username)
if xform.allows_sms:
context.sms_support_doc = get_autodoc_for(xform)
return render_to_response("show.html", context_instance=context)
@require_GET
def api_token(request, username=None):
user = get_object_or_404(User, username=username)
context = RequestContext(request)
context.token_key, created = Token.objects.get_or_create(user=user)
return render_to_response("api_token.html", context_instance=context)
@require_http_methods(["GET", "OPTIONS"])
def api(request, username=None, id_string=None):
"""
Returns all results as JSON. If a parameter string is passed,
it takes the 'query' parameter, converts this string to a dictionary, an
that is then used as a MongoDB query string.
NOTE: only a specific set of operators are allow, currently $or and $and.
Please send a request if you'd like another operator to be enabled.
NOTE: Your query must be valid JSON, double check it here,
http://json.parser.online.fr/
E.g. api?query='{"last_name": "Smith"}'
"""
if request.method == "OPTIONS":
response = HttpResponse()
add_cors_headers(response)
return response
helper_auth_helper(request)
helper_auth_helper(request)
xform, owner = check_and_set_user_and_form(username, id_string, request)
if not xform:
return HttpResponseForbidden(_(u'Not shared.'))
try:
args = {
'username': username,
'id_string': id_string,
'query': request.GET.get('query'),
'fields': request.GET.get('fields'),
'sort': request.GET.get('sort')
}
if 'start' in request.GET:
args["start"] = int(request.GET.get('start'))
if 'limit' in request.GET:
args["limit"] = int(request.GET.get('limit'))
if 'count' in request.GET:
args["count"] = True if int(request.GET.get('count')) > 0\
else False
cursor = ParsedInstance.query_mongo(**args)
except ValueError, e:
return HttpResponseBadRequest(e.__str__())
records = list(record for record in cursor)
response_text = json.dumps(records)
if 'callback' in request.GET and request.GET.get('callback') != '':
callback = request.GET.get('callback')
response_text = ("%s(%s)" % (callback, response_text))
response = HttpResponse(response_text, mimetype='application/json')
add_cors_headers(response)
return response
@require_GET
def public_api(request, username, id_string):
"""
Returns public information about the form as JSON
"""
xform = get_object_or_404(XForm,
user__username=username, id_string=id_string)
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
exports = {'username': xform.user.username,
'id_string': xform.id_string,
'bamboo_dataset': xform.bamboo_dataset,
'shared': xform.shared,
'shared_data': xform.shared_data,
'downloadable': xform.downloadable,
'is_crowd_form': xform.is_crowd_form,
'title': xform.title,
'date_created': xform.date_created.strftime(_DATETIME_FORMAT),
'date_modified': xform.date_modified.strftime(_DATETIME_FORMAT),
'uuid': xform.uuid,
}
response_text = json.dumps(exports)
return HttpResponse(response_text, mimetype='application/json')
@login_required
def edit(request, username, id_string):
xform = XForm.objects.get(user__username=username, id_string=id_string)
owner = xform.user
if request.GET.get('crowdform'):
crowdform_action = request.GET['crowdform']
request_username = request.user.username
# ensure is crowdform
if xform.is_crowd_form:
if crowdform_action == 'delete':
MetaData.objects.get(
xform__id_string=id_string,
data_value=request_username,
data_type=MetaData.CROWDFORM_USERS
).delete()
elif crowdform_action == 'add':
MetaData.crowdform_users(xform, request_username)
return HttpResponseRedirect(reverse(profile, kwargs={
'username': request_username
}))
if username == request.user.username or\
request.user.has_perm('odk_logger.change_xform', xform):
if request.POST.get('description'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Description for '%(id_string)s' updated from "
"'%(old_description)s' to '%(new_description)s'.") %
{
'id_string': xform.id_string,
'old_description': xform.description,
'new_description': request.POST['description']
}, audit, request)
xform.description = request.POST['description']
elif request.POST.get('title'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Title for '%(id_string)s' updated from "
"'%(old_title)s' to '%(new_title)s'.") %
{
'id_string': xform.id_string,
'old_title': xform.title,
'new_title': request.POST.get('title')
}, audit, request)
xform.title = request.POST['title']
elif request.POST.get('toggle_shared'):
if request.POST['toggle_shared'] == 'data':
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Data sharing updated for '%(id_string)s' from "
"'%(old_shared)s' to '%(new_shared)s'.") %
{
'id_string': xform.id_string,
'old_shared': _("shared")
if xform.shared_data else _("not shared"),
'new_shared': _("shared")
if not xform.shared_data else _("not shared")
}, audit, request)
xform.shared_data = not xform.shared_data
elif request.POST['toggle_shared'] == 'form':
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Form sharing for '%(id_string)s' updated "
"from '%(old_shared)s' to '%(new_shared)s'.") %
{
'id_string': xform.id_string,
'old_shared': _("shared")
if xform.shared else _("not shared"),
'new_shared': _("shared")
if not xform.shared else _("not shared")
}, audit, request)
xform.shared = not xform.shared
elif request.POST['toggle_shared'] == 'active':
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Active status for '%(id_string)s' updated from "
"'%(old_shared)s' to '%(new_shared)s'.") %
{
'id_string': xform.id_string,
'old_shared': _("shared")
if xform.downloadable else _("not shared"),
'new_shared': _("shared")
if not xform.downloadable else _("not shared")
}, audit, request)
xform.downloadable = not xform.downloadable
elif request.POST['toggle_shared'] == 'crowd':
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Crowdform status for '%(id_string)s' updated from "
"'%(old_status)s' to '%(new_status)s'.") %
{
'id_string': xform.id_string,
'old_status': _("crowdform")
if not xform.is_crowd_form else _("not crowdform"),
'new_status': _("crowdform")
if xform.is_crowd_form else _("not crowdform"),
}, audit, request)
if xform.is_crowd_form:
xform.is_crowd_form = False
else:
xform.is_crowd_form = True
xform.shared = True
xform.shared_data = True
elif request.POST.get('form-license'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Form License for '%(id_string)s' updated to "
"'%(form_license)s'.") %
{
'id_string': xform.id_string,
'form_license': request.POST['form-license'],
}, audit, request)
MetaData.form_license(xform, request.POST['form-license'])
elif request.POST.get('data-license'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Data license for '%(id_string)s' updated to "
"'%(data_license)s'.") %
{
'id_string': xform.id_string,
'data_license': request.POST['data-license'],
}, audit, request)
MetaData.data_license(xform, request.POST['data-license'])
elif request.POST.get('source') or request.FILES.get('source'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Source for '%(id_string)s' updated to '%(source)s'.") %
{
'id_string': xform.id_string,
'source': request.POST.get('source'),
}, audit, request)
MetaData.source(xform, request.POST.get('source'),
request.FILES.get('source'))
elif request.POST.get('enable_sms_support_trigger') is not None:
sms_support_form = ActivateSMSSupportFom(request.POST)
if sms_support_form.is_valid():
audit = {
'xform': xform.id_string
}
enabled = \
sms_support_form.cleaned_data.get('enable_sms_support')
if enabled:
audit_action = Actions.SMS_SUPPORT_ACTIVATED
audit_message = _(u"SMS Support Activated on")
else:
audit_action = Actions.SMS_SUPPORT_DEACTIVATED
audit_message = _(u"SMS Support Deactivated on")
audit_log(
audit_action, request.user, owner,
audit_message
% {'id_string': xform.id_string}, audit, request)
# stored previous states to be able to rollback form status
# in case we can't save.
pe = xform.allows_sms
pid = xform.sms_id_string
xform.allows_sms = enabled
xform.sms_id_string = \
sms_support_form.cleaned_data.get('sms_id_string')
compat = check_form_sms_compatibility(None,
json.loads(xform.json))
if compat['type'] == 'alert-error':
xform.allows_sms = False
xform.sms_id_string = pid
try:
xform.save()
except IntegrityError:
# unfortunately, there's no feedback mechanism here
xform.allows_sms = pe
xform.sms_id_string = pid
elif request.FILES.get('media'):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Media added to '%(id_string)s'.") %
{
'id_string': xform.id_string
}, audit, request)
for aFile in request.FILES.getlist("media"):
MetaData.media_upload(xform, aFile)
elif request.POST.get('map_name'):
mapbox_layer = MapboxLayerForm(request.POST)
if mapbox_layer.is_valid():
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Map layer added to '%(id_string)s'.") %
{
'id_string': xform.id_string
}, audit, request)
MetaData.mapbox_layer_upload(xform, mapbox_layer.cleaned_data)
elif request.FILES:
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Supporting document added to '%(id_string)s'.") %
{
'id_string': xform.id_string
}, audit, request)
MetaData.supporting_docs(xform, request.FILES['doc'])
xform.update()
if request.is_ajax():
return HttpResponse(_(u'Updated succeeded.'))
else:
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
return HttpResponseForbidden(_(u'Update failed.'))
def getting_started(request):
context = RequestContext(request)
context.template = 'getting_started.html'
return render_to_response('base.html', context_instance=context)
def support(request):
context = RequestContext(request)
context.template = 'support.html'
return render_to_response('base.html', context_instance=context)
def faq(request):
context = RequestContext(request)
context.template = 'faq.html'
return render_to_response('base.html', context_instance=context)
def xls2xform(request):
context = RequestContext(request)
context.template = 'xls2xform.html'
return render_to_response('base.html', context_instance=context)
def tutorial(request):
context = RequestContext(request)
context.template = 'tutorial.html'
username = request.user.username if request.user.username else \
'your-user-name'
context.odk_url = request.build_absolute_uri("/%s" % username)
return render_to_response('base.html', context_instance=context)
def resources(request):
context = RequestContext(request)
if 'fr' in request.LANGUAGE_CODE.lower():
context.deck_id = 'a351f6b0a3730130c98b12e3c5740641'
else:
context.deck_id = '1a33a070416b01307b8022000a1de118'
return render_to_response('resources.html', context_instance=context)
def about_us(request):
context = RequestContext(request)
context.a_flatpage = '/about-us/'
username = request.user.username if request.user.username else \
'your-user-name'
context.odk_url = request.build_absolute_uri("/%s" % username)
return render_to_response('base.html', context_instance=context)
def syntax(request):
if 'fr' in request.LANGUAGE_CODE.lower():
doc_id = '1EhJTsqX3noztyW-UdKRBABhIln6R3TAvXv58DTZWCU4'
else:
doc_id = '1xD5gSjeyjGjw-V9g5hXx7FWeasRvn-L6zeQJsNeAGBI'
url = 'https://docs.google.com/document/pub?id=%s' % doc_id
doc = GoogleDoc(url)
context = RequestContext(request)
context.content = doc.to_html()
return render_to_response('base.html', context_instance=context)
def form_gallery(request):
"""
Return a list of urls for all the shared xls files. This could be
made a lot prettier.
"""
context = RequestContext(request)
if request.user.is_authenticated():
context.loggedin_user = request.user
context.shared_forms = XForm.objects.filter(shared=True)
# build list of shared forms with cloned suffix
id_strings_with_cloned_suffix = [
x.id_string + XForm.CLONED_SUFFIX for x in context.shared_forms
]
# build list of id_strings for forms this user has cloned
context.cloned = [
x.id_string.split(XForm.CLONED_SUFFIX)[0]
for x in XForm.objects.filter(
user__username=request.user.username,
id_string__in=id_strings_with_cloned_suffix
)
]
return render_to_response('form_gallery.html', context_instance=context)
def download_metadata(request, username, id_string, data_id):
xform = get_object_or_404(XForm,
user__username=username, id_string=id_string)
owner = xform.user
if username == request.user.username or xform.shared:
data = get_object_or_404(MetaData, pk=data_id)
file_path = data.data_file.name
filename, extension = os.path.splitext(file_path.split('/')[-1])
extension = extension.strip('.')
dfs = get_storage_class()()
if dfs.exists(file_path):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Document '%(filename)s' for '%(id_string)s' downloaded.") %
{
'id_string': xform.id_string,
'filename': "%s.%s" % (filename, extension)
}, audit, request)
response = response_with_mimetype_and_name(
data.data_file_type,
filename, extension=extension, show_date=False,
file_path=file_path)
return response
else:
return HttpResponseNotFound()
return HttpResponseForbidden(_(u'Permission denied.'))
@login_required()
def delete_metadata(request, username, id_string, data_id):
xform = get_object_or_404(XForm,
user__username=username, id_string=id_string)
owner = xform.user
data = get_object_or_404(MetaData, pk=data_id)
dfs = get_storage_class()()
req_username = request.user.username
if request.GET.get('del', False) and username == req_username:
try:
dfs.delete(data.data_file.name)
data.delete()
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Document '%(filename)s' deleted from '%(id_string)s'.") %
{
'id_string': xform.id_string,
'filename': os.path.basename(data.data_file.name)
}, audit, request)
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
except Exception:
return HttpResponseServerError()
elif request.GET.get('map_name_del', False) and username == req_username:
data.delete()
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Map layer deleted from '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
return HttpResponseForbidden(_(u'Permission denied.'))
def download_media_data(request, username, id_string, data_id):
xform = get_object_or_404(
XForm, user__username=username, id_string=id_string)
owner = xform.user
data = get_object_or_404(MetaData, id=data_id)
dfs = get_storage_class()()
if request.GET.get('del', False):
if username == request.user.username:
try:
dfs.delete(data.data_file.name)
data.delete()
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Media download '%(filename)s' deleted from "
"'%(id_string)s'.") %
{
'id_string': xform.id_string,
'filename': os.path.basename(data.data_file.name)
}, audit, request)
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
except Exception:
return HttpResponseServerError()
else:
if username: # == request.user.username or xform.shared:
file_path = data.data_file.name
filename, extension = os.path.splitext(file_path.split('/')[-1])
extension = extension.strip('.')
if dfs.exists(file_path):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_UPDATED, request.user, owner,
_("Media '%(filename)s' downloaded from "
"'%(id_string)s'.") %
{
'id_string': xform.id_string,
'filename': os.path.basename(file_path)
}, audit, request)
response = response_with_mimetype_and_name(
data.data_file_type,
filename, extension=extension, show_date=False,
file_path=file_path)
return response
else:
return HttpResponseNotFound()
return HttpResponseForbidden(_(u'Permission denied.'))
def form_photos(request, username, id_string):
xform, owner = check_and_set_user_and_form(username, id_string, request)
if not xform:
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
context.form_view = True
context.content_user = owner
context.xform = xform
image_urls = []
for instance in xform.surveys.all():
for attachment in instance.attachments.all():
# skip if not image e.g video or file
if not attachment.mimetype.startswith('image'):
continue
data = {}
for i in ['small', 'medium', 'large', 'original']:
url = reverse(attachment_url, kwargs={'size': i})
url = '%s?media_file=%s' % (url, attachment.media_file.name)
data[i] = url
image_urls.append(data)
context.images = image_urls
context.profile, created = UserProfile.objects.get_or_create(user=owner)
return render_to_response('form_photos.html', context_instance=context)
@require_POST
def set_perm(request, username, id_string):
xform = get_object_or_404(XForm,
user__username=username, id_string=id_string)
owner = xform.user
if username != request.user.username\
and not has_permission(xform, username, request):
return HttpResponseForbidden(_(u'Permission denied.'))
try:
perm_type = request.POST['perm_type']
for_user = request.POST['for_user']
except KeyError:
return HttpResponseBadRequest()
if perm_type in ['edit', 'view', 'remove']:
try:
user = User.objects.get(username=for_user)
except User.DoesNotExist:
messages.add_message(
request, messages.INFO,
_(u"Wrong username <b>%s</b>." % for_user),
extra_tags='alert-error')
else:
if perm_type == 'edit' and\
not user.has_perm('change_xform', xform):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_PERMISSIONS_UPDATED, request.user, owner,
_("Edit permissions on '%(id_string)s' assigned to "
"'%(for_user)s'.") %
{
'id_string': xform.id_string,
'for_user': for_user
}, audit, request)
assign_perm('change_xform', user, xform)
elif perm_type == 'view' and\
not user.has_perm('view_xform', xform):
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_PERMISSIONS_UPDATED, request.user, owner,
_("View permissions on '%(id_string)s' "
"assigned to '%(for_user)s'.") %
{
'id_string': xform.id_string,
'for_user': for_user
}, audit, request)
assign_perm('view_xform', user, xform)
elif perm_type == 'remove':
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_PERMISSIONS_UPDATED, request.user, owner,
_("All permissions on '%(id_string)s' "
"removed from '%(for_user)s'.") %
{
'id_string': xform.id_string,
'for_user': for_user
}, audit, request)
remove_perm('change_xform', user, xform)
remove_perm('view_xform', user, xform)
elif perm_type == 'link':
current = MetaData.public_link(xform)
if for_user == 'all':
MetaData.public_link(xform, True)
elif for_user == 'none':
MetaData.public_link(xform, False)
elif for_user == 'toggle':
MetaData.public_link(xform, not current)
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_PERMISSIONS_UPDATED, request.user, owner,
_("Public link on '%(id_string)s' %(action)s.") %
{
'id_string': xform.id_string,
'action': "created"
if for_user == "all" or
(for_user == "toggle" and not current) else "removed"
}, audit, request)
if request.is_ajax():
return HttpResponse(
json.dumps(
{'status': 'success'}), mimetype='application/json')
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
def show_submission(request, username, id_string, uuid):
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
owner = xform.user
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseRedirect(reverse(home))
submission = get_object_or_404(Instance, uuid=uuid)
audit = {
'xform': xform.id_string
}
audit_log(
Actions.SUBMISSION_ACCESSED, request.user, owner,
_("Submission '%(uuid)s' on '%(id_string)s' accessed.") %
{
'id_string': xform.id_string,
'uuid': uuid
}, audit, request)
return HttpResponseRedirect(reverse(
survey_responses, kwargs={'instance_id': submission.pk}))
@require_POST
@login_required
def delete_data(request, username=None, id_string=None):
xform, owner = check_and_set_user_and_form(username, id_string, request)
response_text = u''
if not xform:
return HttpResponseForbidden(_(u'Not shared.'))
data_id = request.POST.get('id')
if not data_id:
return HttpResponseBadRequest(_(u"id must be specified"))
Instance.set_deleted_at(data_id)
audit = {
'xform': xform.id_string
}
audit_log(
Actions.SUBMISSION_DELETED, request.user, owner,
_("Deleted submission with id '%(record_id)s' "
"on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'record_id': data_id
}, audit, request)
response_text = json.dumps({"success": "Deleted data %s" % data_id})
if 'callback' in request.GET and request.GET.get('callback') != '':
callback = request.GET.get('callback')
response_text = ("%s(%s)" % (callback, response_text))
return HttpResponse(response_text, mimetype='application/json')
@require_POST
@is_owner
def link_to_bamboo(request, username, id_string):
xform = get_object_or_404(XForm,
user__username=username, id_string=id_string)
owner = xform.user
from utils.bamboo import (get_new_bamboo_dataset,
delete_bamboo_dataset, ensure_rest_service)
audit = {
'xform': xform.id_string
}
# try to delete the dataset first (in case it exists)
if xform.bamboo_dataset and delete_bamboo_dataset(xform):
xform.bamboo_dataset = u''
xform.save()
audit_log(
Actions.BAMBOO_LINK_DELETED, request.user, owner,
_("Bamboo link deleted on '%(id_string)s'.")
% {'id_string': xform.id_string}, audit, request)
# create a new one from all the data
dataset_id = get_new_bamboo_dataset(xform)
# update XForm
xform.bamboo_dataset = dataset_id
xform.save()
ensure_rest_service(xform)
audit_log(
Actions.BAMBOO_LINK_CREATED, request.user, owner,
_("Bamboo link created on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
@require_POST
@is_owner
def update_xform(request, username, id_string):
xform = get_object_or_404(
XForm, user__username=username, id_string=id_string)
owner = xform.user
def set_form():
form = QuickConverter(request.POST, request.FILES)
survey = form.publish(request.user, id_string).survey
enketo_webform_url = reverse(
enter_data,
kwargs={'username': username, 'id_string': survey.id_string}
)
audit = {
'xform': xform.id_string
}
audit_log(
Actions.FORM_XLS_UPDATED, request.user, owner,
_("XLS for '%(id_string)s' updated.") %
{
'id_string': xform.id_string,
}, audit, request)
return {
'type': 'alert-success',
'text': _(u'Successfully published %(form_id)s.'
u' <a href="%(form_url)s">Enter Web Form</a>'
u' or <a href="#preview-modal" data-toggle="modal">'
u'Preview Web Form</a>')
% {'form_id': survey.id_string,
'form_url': enketo_webform_url}
}
message = publish_form(set_form)
messages.add_message(
request, messages.INFO, message['text'], extra_tags=message['type'])
return HttpResponseRedirect(reverse(show, kwargs={
'username': username,
'id_string': id_string
}))
@is_owner
def activity(request, username):
owner = get_object_or_404(User, username=username)
context = RequestContext(request)
context.user = owner
return render_to_response('activity.html', context_instance=context)
def activity_fields(request):
fields = [
{
'id': 'created_on',
'label': _('Performed On'),
'type': 'datetime',
'searchable': False
},
{
'id': 'action',
'label': _('Action'),
'type': 'string',
'searchable': True,
'options': sorted([Actions[e] for e in Actions.enums])
},
{
'id': 'user',
'label': 'Performed By',
'type': 'string',
'searchable': True
},
{
'id': 'msg',
'label': 'Description',
'type': 'string',
'searchable': True
},
]
response_text = json.dumps(fields)
return HttpResponse(response_text, mimetype='application/json')
@is_owner
def activity_api(request, username):
from bson.objectid import ObjectId
def stringify_unknowns(obj):
if isinstance(obj, ObjectId):
return str(obj)
if isinstance(obj, datetime):
return obj.strftime(DATETIME_FORMAT)
#raise TypeError
return None
try:
query_args = {
'username': username,
'query': json.loads(request.GET.get('query'))
if request.GET.get('query') else {},
'fields': json.loads(request.GET.get('fields'))
if request.GET.get('fields') else [],
'sort': json.loads(request.GET.get('sort'))
if request.GET.get('sort') else {}
}
if 'start' in request.GET:
query_args["start"] = int(request.GET.get('start'))
if 'limit' in request.GET:
query_args["limit"] = int(request.GET.get('limit'))
if 'count' in request.GET:
query_args["count"] = True \
if int(request.GET.get('count')) > 0 else False
cursor = AuditLog.query_mongo(**query_args)
except ValueError, e:
return HttpResponseBadRequest(e.__str__())
records = list(record for record in cursor)
response_text = json.dumps(records, default=stringify_unknowns)
if 'callback' in request.GET and request.GET.get('callback') != '':
callback = request.GET.get('callback')
response_text = ("%s(%s)" % (callback, response_text))
return HttpResponse(response_text, mimetype='application/json')
def qrcode(request, username, id_string):
formhub_url = settings.SERVER_EXTERNAL_URL
form_url = formhub_url + username
if settings.TESTING_MODE:
form_url = "https://testserver.com/bob"
results = _(u"Unexpected Error occured: No QRCODE generated")
status = 200
try:
url = enketo_url(form_url, id_string)
except Exception, e:
error_msg = _(u"Error Generating QRCODE: %s" % e)
results = """<div class="alert alert-error">%s</div>""" % error_msg
status = 400
else:
if url:
image = generate_qrcode(url)
results = """<img class="qrcode" src="%s" alt="%s" />
</br><a href="%s" target="_blank">%s</a>""" \
% (image, url, url, url)
else:
status = 400
return HttpResponse(results, mimetype='text/html', status=status)
def enketo_preview(request, username, id_string):
xform = get_object_or_404(
XForm, user__username=username, id_string=id_string)
owner = xform.user
if not has_permission(xform, owner, request, xform.shared):
return HttpResponseForbidden(_(u'Not shared.'))
enekto_preview_url = \
"%(enketo_url)s?server=%(profile_url)s&id=%(id_string)s" % {
'enketo_url': settings.ENKETO_PREVIEW_URL,
'profile_url': request.build_absolute_uri(
reverse(profile, kwargs={'username': owner.username})),
'id_string': xform.id_string
}
return HttpResponseRedirect(enekto_preview_url)
@require_GET
@login_required
def username_list(request):
data = []
query = request.GET.get('query', None)
if query:
users = User.objects.values('username')\
.filter(username__startswith=query, is_active=True, pk__gte=0)
data = [user['username'] for user in users]
return HttpResponse(json.dumps(data), mimetype='application/json')
| {
"content_hash": "b28996b913ae00f0f01df5d2ac40eede",
"timestamp": "",
"source": "github",
"line_count": 1322,
"max_line_length": 79,
"avg_line_length": 39.54841149773071,
"alnum_prop": 0.558996232044833,
"repo_name": "wesley1001/formhub",
"id": "b818df50e0edadb3afe084a320139201fc3e808d",
"size": "52283",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "main/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "286133"
},
{
"name": "HTML",
"bytes": "1669852"
},
{
"name": "JavaScript",
"bytes": "2294844"
},
{
"name": "Makefile",
"bytes": "8446"
},
{
"name": "Python",
"bytes": "1543287"
},
{
"name": "Shell",
"bytes": "11919"
}
],
"symlink_target": ""
} |
import scrapy
class StackOverflowSpider(scrapy.Spider):
name = 'stackoverflow'
start_urls = ['http://stackoverflow.com/questions?sort=votes']
def parse(self, response):
for href in response.css('.question-summary h3 a::attr(href)'):
full_url = response.urljoin(href.extract())
yield scrapy.Request(full_url, callback=self.parse_question)
def parse_question(self, response):
yield {
'title': response.css('h1 a::text').extract()[0],
'votes': response.css('.question .vote-count-post::text').extract()[0],
'body': response.css('.question .post-text').extract()[0],
'tags': response.css('.question .post-tags::text').extract(),
'link': response.url,
} | {
"content_hash": "35e9cd6c3544a00d8a92e4a599c9acc8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 40.78947368421053,
"alnum_prop": 0.6064516129032258,
"repo_name": "Akagi201/learning-python",
"id": "89bda90f74f1e8318fdf4625fb4999c0048a6576",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy/stackoverflow_spider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from cms.models import Blog
from cms.models import Post
class BlogAdmin(admin.ModelAdmin):
list_display = (
'id',
"user",
"name",
'description',
'created_at',
'updated_at'
)
search_fields = ['^id', 'name', "description"]
class PostAdmin(admin.ModelAdmin):
list_display = (
'id',
"blog_name",
"contents",
'created_at',
"updated_at",
)
def blog_name(self, obj):
return obj.blog.name
blog_name.short_description = 'ブログ名'
admin.site.register(Blog, BlogAdmin)
admin.site.register(Post, PostAdmin) | {
"content_hash": "dea76f1e9e96b1038278b4964f01d303",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 50,
"avg_line_length": 18.771428571428572,
"alnum_prop": 0.5799086757990868,
"repo_name": "masaki-sato/django-sample",
"id": "5d8c5eb5db0634651b04de390bbec0b3ab2763dd",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/cms/admin.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import sys
import json
from random import randint
import StringIO
import socket
import uuid
import shutil
' Test command line usage: python genimporter_solver.py <modulename> <quakeml> <station> <user_conf>'
' scriptpath1="./resources/"+sys.argv[1]+".py '
from specfemGlobeCompiler import *
class GeneralImporter():
def __init__(self, args):
self.input = args
' Prepare the VERCE PE input '
'1- data'
'json_data = open("./test-resources/pyFileReadTest.in"); '
'data = json.load(json_data)'
def runit(self):
jsonout = json.load(open(self.input[1]))
par_file = self.input[2]
cmt_solution = self.input[3]
stations = self.input[4]
compiler=self.input[5]
if compiler=="GNU":
configure="./configure FC=gfortran CC=gcc MPIFC=mpif90"
if compiler=="Intel":
configure="./configure FC=ifort CC=icc CXX=icpc MPIFC=mpiifort"
'1- Data dict'
' The last item produced by the inputGenerator PE contains the location of the needed input files. '
data = {"streams": [jsonout["streams"][len(jsonout["streams"]) - 1]]};
'2- System dict'
verce = {}
verce.update({"inputrootpath": "./"});
verce.update({"outputdest": "./"});
verce.update({"username": jsonout["metadata"]["username"]});
verce.update({"runId": jsonout["metadata"]["runId"]});
verce.update({"outputid": str(uuid.uuid1())});
parameters = {"par_file":par_file, "cmt_solution":cmt_solution,"stations": stations, "configure" : configure };
# Configuring and compiling specfem source code using Intel ifort compiler
proc = specfemGlobeCompiler(name='specfemGlobeCompiler', input=data, params=parameters, vercejson=verce, stdoutredirect=False, caller=self);
outputcom = proc.process()
provenancebulk = []
provenancebulk.append(outputcom["metadata"])
#print json.dumps(provenancebulk)
file = open("provout_compile-" + verce["runId"], "wb")
file.write(json.dumps(provenancebulk))
file.flush()
shutil.copy("provout_compile-" + verce["runId"], os.environ["PROV_PATH"] + "/")
if __name__ == "__main__":
proc = GeneralImporter(sys.argv)
proc.runit() | {
"content_hash": "8bf62c7db07c7671f7f73ea5b867e96a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 148,
"avg_line_length": 33.705882352941174,
"alnum_prop": 0.6278359511343804,
"repo_name": "rafiqsaleh/VERCE",
"id": "57ba892dc50f5ff501efabdb448ff3a7ea2e02c5",
"size": "2292",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "verce-hpc-pe/src/run_specfemGlobe_compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3310"
},
{
"name": "CSS",
"bytes": "205682"
},
{
"name": "HTML",
"bytes": "282663"
},
{
"name": "Java",
"bytes": "716231"
},
{
"name": "JavaScript",
"bytes": "10335428"
},
{
"name": "Makefile",
"bytes": "2325"
},
{
"name": "Python",
"bytes": "3426039"
},
{
"name": "Shell",
"bytes": "121346"
},
{
"name": "TeX",
"bytes": "278086"
}
],
"symlink_target": ""
} |
from ovirtcli.shell.cmdshell import CmdShell
class DisconnectCmdShell(CmdShell):
NAME = 'disconnect'
def __init__(self, context):
CmdShell.__init__(self, context)
def do_disconnect(self, args):
return self.context.execute_string(DisconnectCmdShell.NAME + ' ' + args + '\n')
| {
"content_hash": "59cc00f30d3522593246b8959b47ee8b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 25.583333333333332,
"alnum_prop": 0.6677524429967426,
"repo_name": "oVirt/ovirt-engine-cli",
"id": "4f02b3be7267f948c8e830115b7ae1a47fda37e4",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ovirtcli/shell/disconnectcmdshell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "870"
},
{
"name": "Python",
"bytes": "365026"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import os
import json
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from .menubar import MenuBar
from .statusbar import StatusBar
from .centerwindow import CenterWindow
from gui.uiconfig import windowsoptions
from gui.uiutil import set_skin, changebg
import gui.dialogs as dialogs
from .guiconfig import collectView, views
class MainWindow(QtWidgets.QMainWindow):
viewID = "MainWindow"
@collectView
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint) # 无边框, 带系统菜单, 可以最小化
self.initMainWindow()
self.createCenterWindow()
self.createMenus()
self.createToolbars()
self.createStatusbar()
self.setskin()
def initMainWindow(self):
mainwindowSettings = windowsoptions['mainwindow']
title = mainwindowSettings['title']
minsize = mainwindowSettings['minsize']
size = mainwindowSettings['size']
windowicon = mainwindowSettings['icon']
fullscreenflag = mainwindowSettings['fullscreenflag']
desktopWidth = QtWidgets.QDesktopWidget().availableGeometry().width()
desktopHeight = QtWidgets.QDesktopWidget().availableGeometry().height()
self.setWindowTitle(title)
self.setWindowIcon(QtGui.QIcon(windowicon)) # 设置程序图标
self.resize(960, 760)
self.moveCenter() # 将窗口固定在屏幕中间
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.layout().setContentsMargins(0, 0, 0, 0)
self.fullscreenflag = fullscreenflag # 初始化时非窗口最大话标志
def createCenterWindow(self):
self.centeralwindow = CenterWindow(self)
self.setCentralWidget(self.centeralwindow)
def createMenus(self):
menubar = MenuBar(self)
self.setMenuBar(menubar)
def createToolbars(self):
pass
def createStatusbar(self):
statusbar = StatusBar(self)
self.setStatusBar(statusbar)
def setskin(self):
set_skin(self, 'gui/skin/qss/main.qss') # 设置主窗口样式
changebg(self, windowsoptions['frameqss'])
def moveCenter(self):
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
elif event.key() == QtCore.Qt.Key_F11:
views['TitleBar'].actionMax()
elif event.key() == QtCore.Qt.Key_F9:
bar = self.menuBar()
bar.setVisible(not bar.isVisible())
elif event.key() == QtCore.Qt.Key_F8:
bar = self.statusBar()
bar.setVisible(not bar.isVisible())
def mousePressEvent(self, event):
# 鼠标点击事件
if event.button() == QtCore.Qt.LeftButton:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
event.accept()
def mouseReleaseEvent(self, event):
# 鼠标释放事件
if hasattr(self, "dragPosition"):
del self.dragPosition
def mouseMoveEvent(self, event):
# 鼠标移动事件
if hasattr(self, "dragPosition"):
if event.buttons() == QtCore.Qt.LeftButton:
self.move(event.globalPos() - self.dragPosition)
event.accept()
# def closeEvent(self, evt):
# flag, exitflag = dialogs.exit(windowsoptions['exitdialog'])
# if flag:
# for item in exitflag:
# if item == 'minRadio' and exitflag[item]:
# self.showMinimized()
# evt.ignore()
# elif item == 'exitRadio' and exitflag[item]:
# evt.accept()
# elif item == 'exitsaveRadio' and exitflag[item]:
# evt.accept()
# self.saveoptions()
# if not os.path.exists("options"):
# os.mkdir("options")
# with open("options\windowsoptions.json", 'w') as f:
# json.dump(windowsoptions, f, indent=4)
# else:
# evt.ignore()
def saveoptions(self):
from gui.uiconfig import windowsoptions
windowsoptions['mainwindow']['fullscreenflag'] = self.isFullScreen()
| {
"content_hash": "4043605930e35b9f666e66252ce25b28",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 149,
"avg_line_length": 34.328125,
"alnum_prop": 0.6115157032316796,
"repo_name": "dragondjf/CloudSetuper",
"id": "ce1d472901a68477750866a7d11c11f330100a72",
"size": "4573",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setuper desktop app/gui/mainwindow/mainwindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1806"
},
{
"name": "C++",
"bytes": "3059"
},
{
"name": "CSS",
"bytes": "171046"
},
{
"name": "JavaScript",
"bytes": "801718"
},
{
"name": "Lua",
"bytes": "72652"
},
{
"name": "Objective-C",
"bytes": "342"
},
{
"name": "Python",
"bytes": "8361927"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Unit test cases for COT.edit_properties.COTEditProperties class."""
import logging
import os
import re
from COT.commands.tests.command_testcase import CommandTestCase
from COT.commands.edit_properties import COTEditProperties
from COT.data_validation import ValueUnsupportedError
class TestCOTEditProperties(CommandTestCase):
"""Unit tests for COTEditProperties command."""
command_class = COTEditProperties
def setUp(self):
"""Test case setup function called automatically prior to each test."""
super(TestCOTEditProperties, self).setUp()
self.counter = 0
def test_not_ready_to_run_labels(self):
"""Test ready_to_run() failure scenarios involving the --label opt."""
self.command.package = self.input_ovf
# --label requires --properties
self.command.labels = ["label1", "label2"]
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, r"--label.*requires.*--properties")
# --label and --properties must have the same number of params
self.command.properties = ["foo=bar"]
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, r"--label.*\(2\).*--properties \(1\)")
def test_not_ready_to_run_descriptions(self):
"""Test ready_to_run() failure scenarios involving the --desc opt."""
self.command.package = self.input_ovf
# --desc requires --properties
self.command.descriptions = ["desc1", "desc2"]
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, r"--description.*requires.*--properties")
# --desc and --properties must have the same number of params
self.command.properties = ["foo=bar"]
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, r"--description.*\(2\).*--properties \(1\)")
def test_set_property_value(self):
"""Set the value of an existing property."""
self.command.package = self.input_ovf
self.command.properties = ["login-username=admin"]
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Category>1. Bootstrap Properties</ovf:Category>
- <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+ <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="admin">
<ovf:Label>Login Username</ovf:Label>
""")
def test_set_multiple_property_values(self):
"""Set the value of several existing properties."""
self.command.package = self.input_ovf
self.command.properties = [
"login-username=admin",
"login-password=cisco123",
"enable-ssh-server=1"]
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Category>1. Bootstrap Properties</ovf:Category>
- <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+ <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="admin">
<ovf:Label>Login Username</ovf:Label>
...
</ovf:Property>
- <ovf:Property ovf:key="login-password" ovf:password="true" \
ovf:qualifiers="MaxLen(25)" ovf:type="string" ovf:userConfigurable="true" \
ovf:value="">
+ <ovf:Property ovf:key="login-password" ovf:password="true" \
ovf:qualifiers="MaxLen(25)" ovf:type="string" ovf:userConfigurable="true" \
ovf:value="cisco123">
<ovf:Label>Login Password</ovf:Label>
...
<ovf:Category>2. Features</ovf:Category>
- <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="false">
+ <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="true">
<ovf:Label>Enable SSH Login</ovf:Label>
""")
def test_create_property(self):
"""Create new properties but do not set their values yet."""
self.command.package = self.input_ovf
self.command.properties = [
"new-property-2=", # default value is empty string
"new-property-3", # no default value
]
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
+ <ovf:Property ovf:key="new-property-2" ovf:type="string" ovf:value="" />
+ <ovf:Property ovf:key="new-property-3" ovf:type="string" />
</ovf:ProductSection>
""")
def test_create_and_set_property(self):
"""Create a new property and set its value."""
self.command.package = self.input_ovf
self.command.properties = ["new-property=hello"]
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
+ <ovf:Property ovf:key="new-property" ovf:type="string" \
ovf:value="hello" />
</ovf:ProductSection>
""")
def test_create_property_variants(self):
"""Variant options for creating new properties."""
self.command.package = self.input_ovf
self.command.properties = [
"empty-property",
"property-with-value=value",
"prop-with-type+string",
"prop-with-value-and-type=yes+boolean",
]
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
+ <ovf:Property ovf:key="empty-property" ovf:type="string" />
+ <ovf:Property ovf:key="property-with-value" ovf:type="string" \
ovf:value="value" />
+ <ovf:Property ovf:key="prop-with-type" ovf:type="string" />
+ <ovf:Property ovf:key="prop-with-value-and-type" ovf:type="boolean" \
ovf:value="true" />
</ovf:ProductSection>
""")
def test_change_type_existing_invalid(self):
"""Change the type of an existing property so that value is invalid."""
self.command.package = self.invalid_ovf
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
self.command.properties = ['jabberwock+boolean']
with self.assertRaises(ValueUnsupportedError):
self.command.run()
def test_create_edit_and_user_configurable(self):
"""Create new props, edit existing, and set user-configable flag."""
self.command.package = self.input_ovf
self.command.properties = [
'new-property=false+boolean',
'domain-name=example.com',
'another-new=yep!',
'enable-https-server+string',
]
self.command.user_configurable = False
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
- <ovf:Property ovf:key="enable-https-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="false">
+ <ovf:Property ovf:key="enable-https-server" ovf:type="string" \
ovf:userConfigurable="false" ovf:value="false">
<ovf:Label>Enable HTTPS Server</ovf:Label>
...
</ovf:Property>
- <ovf:Property ovf:key="domain-name" ovf:qualifiers="MaxLen(238)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+ <ovf:Property ovf:key="domain-name" ovf:qualifiers="MaxLen(238)" \
ovf:type="string" ovf:userConfigurable="false" ovf:value="example.com">
<ovf:Label>Domain Name</ovf:Label>
...
</ovf:Property>
+ <ovf:Property ovf:key="new-property" ovf:type="boolean" \
ovf:userConfigurable="false" ovf:value="false" />
+ <ovf:Property ovf:key="another-new" ovf:type="string" \
ovf:userConfigurable="false" ovf:value="yep!" />
</ovf:ProductSection>
""")
def test_load_config_file(self):
"""Inject a sequence of properties from a config file."""
self.command.package = self.input_ovf
self.command.config_file = self.sample_cfg
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
+ <ovf:Property ovf:key="config-0001" ovf:type="string" \
ovf:value="interface GigabitEthernet0/0/0/0" />
+ <ovf:Property ovf:key="config-0002" ovf:type="string" \
ovf:value="no shutdown" />
+ <ovf:Property ovf:key="config-0003" ovf:type="string" \
ovf:value="interface Loopback0" />
+ <ovf:Property ovf:key="config-0004" ovf:type="string" ovf:value="end" />
</ovf:ProductSection>
""")
def test_load_config_file_relative_path(self):
"""Inject a sequence of properties from a relative-path config file."""
os.chdir(os.path.dirname(self.sample_cfg))
self.command.package = os.path.relpath(self.input_ovf)
self.command.config_file = os.path.basename(self.sample_cfg)
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
+ <ovf:Property ovf:key="config-0001" ovf:type="string" \
ovf:value="interface GigabitEthernet0/0/0/0" />
+ <ovf:Property ovf:key="config-0002" ovf:type="string" \
ovf:value="no shutdown" />
+ <ovf:Property ovf:key="config-0003" ovf:type="string" \
ovf:value="interface Loopback0" />
+ <ovf:Property ovf:key="config-0004" ovf:type="string" ovf:value="end" />
</ovf:ProductSection>
""")
def test_combined(self):
"""Set individual properties AND add from a config file."""
self.command.package = self.input_ovf
self.command.config_file = self.sample_cfg
self.command.properties = ["login-password=cisco123",
"enable-ssh-server=1"]
self.command.user_configurable = True
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:Property>
- <ovf:Property ovf:key="login-password" ovf:password="true" \
ovf:qualifiers="MaxLen(25)" ovf:type="string" ovf:userConfigurable="true" \
ovf:value="">
+ <ovf:Property ovf:key="login-password" ovf:password="true" \
ovf:qualifiers="MaxLen(25)" ovf:type="string" ovf:userConfigurable="true" \
ovf:value="cisco123">
<ovf:Label>Login Password</ovf:Label>
...
<ovf:Category>2. Features</ovf:Category>
- <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="false">
+ <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="true">
<ovf:Label>Enable SSH Login</ovf:Label>
...
</ovf:Property>
+ <ovf:Property ovf:key="config-0001" ovf:type="string" \
ovf:userConfigurable="true" ovf:value="interface GigabitEthernet0/0/0/0" />
+ <ovf:Property ovf:key="config-0002" ovf:type="string" \
ovf:userConfigurable="true" ovf:value="no shutdown" />
+ <ovf:Property ovf:key="config-0003" ovf:type="string" \
ovf:userConfigurable="true" ovf:value="interface Loopback0" />
+ <ovf:Property ovf:key="config-0004" ovf:type="string" \
ovf:userConfigurable="true" ovf:value="end" />
</ovf:ProductSection>
""")
def test_qualifiers_maxlen(self):
"""Ensure property values are limited by MaxLen qualifiers."""
self.command.package = self.input_ovf
vm = self.command.vm
vm.set_property_value("login-password", "ababab")
self.assertRaises(ValueUnsupportedError,
vm.set_property_value,
"login-password",
# max length 25 characters according to OVF
"abcdefghijklmnopqrstuvwxyz")
def test_qualifiers_minlen(self):
"""Ensure property values are limited by MinLen qualifiers."""
self.command.package = self.invalid_ovf
self.assertLogged(**self.UNRECOGNIZED_PRODUCT_CLASS)
self.assertLogged(**self.NONEXISTENT_FILE)
vm = self.command.vm
vm.set_property_value("jabberwock", "super duper alley-ooper scooper")
self.assertRaises(ValueUnsupportedError,
vm.set_property_value,
"jabberwock",
"short")
def test_update_label_and_description(self):
"""Update label and description for existing properties."""
self.command.package = self.input_ovf
self.command.properties = ["hostname", "enable-ssh-server"]
self.command.labels = ["Hostname", "Enable Remote SSH Access"]
self.command.descriptions = ["Enter the router hostname",
"Enable <sshd>; disable <telnetd>"]
self.command.run()
self.command.finished()
self.check_diff("""
<ovf:Property ovf:key="hostname" ovf:qualifiers="MaxLen(63)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="">
- <ovf:Label>Router Name</ovf:Label>
- <ovf:Description>Hostname of this router</ovf:Description>
+ <ovf:Label>Hostname</ovf:Label>
+ <ovf:Description>Enter the router hostname</ovf:Description>
</ovf:Property>
...
<ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="false">
- <ovf:Label>Enable SSH Login</ovf:Label>
- <ovf:Description>Enable remote login via SSH and disable remote \
login via telnet. Requires login-username and login-password to be \
set!</ovf:Description>
+ <ovf:Label>Enable Remote SSH Access</ovf:Label>
+ <ovf:Description>Enable <sshd>; disable \
<telnetd></ovf:Description>
</ovf:Property>
""")
def test_create_property_no_preexisting(self):
"""Set property values for an OVF that has none previously."""
self.command.package = self.minimal_ovf
self.command.properties = ["hello=world"]
self.command.run()
self.command.finished()
self.check_diff(file1=self.minimal_ovf, expected="""
</ovf:VirtualHardwareSection>
+ <ovf:ProductSection>
+ <ovf:Info>Product Information</ovf:Info>
+ <ovf:Property ovf:key="hello" ovf:type="string" ovf:value="world" />
+ </ovf:ProductSection>
</ovf:VirtualSystem>
""")
def test_create_property_no_preexisting_v09(self):
"""Set property for a v0.9 OVF with no pre-existing properties."""
self.command.package = self.v09_ovf
self.command.properties = ["hello=world"]
self.assertRaises(NotImplementedError, self.command.run)
def test_config_file_not_supported(self):
"""Platform doesn't support literal CLI configuration."""
self.command.package = self.iosv_ovf
self.command.config_file = self.sample_cfg
self.assertRaises(NotImplementedError,
self.command.run)
def test_set_transport(self):
"""Set environment transport value."""
self.command.package = self.input_ovf
self.command.transports = ['ibm', 'iso', 'vmware']
self.assertEqual(self.command.transports,
["http://www.ibm.com/xmlns/ovf/transport/filesystem/"
"etc/ovf-transport", "iso", "com.vmware.guestInfo"])
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:OperatingSystemSection>
- <ovf:VirtualHardwareSection ovf:transport="iso">
+ <ovf:VirtualHardwareSection ovf:transport="http://www.ibm.com/xmlns/ovf/\
transport/filesystem/etc/ovf-transport iso com.vmware.guestInfo">
<ovf:Info>Virtual hardware requirements</ovf:Info>
""")
UNKNOWN_TRANSPORT = {
'levelname': 'WARNING',
'msg': "Unknown transport value '%s'. .*",
'args': ('foobar', ),
}
def test_set_transport_unknown(self):
"""Setting the transport to an unknown value is OK but warned about."""
self.command.package = self.input_ovf
self.command.transports = ['com.vmware.guestInfo', 'foobar']
self.assertLogged(**self.UNKNOWN_TRANSPORT)
self.assertEqual(self.command.transports,
['com.vmware.guestInfo', 'foobar'])
self.command.run()
self.command.finished()
self.check_diff("""
</ovf:OperatingSystemSection>
- <ovf:VirtualHardwareSection ovf:transport="iso">
+ <ovf:VirtualHardwareSection ovf:transport="com.vmware.guestInfo foobar">
<ovf:Info>Virtual hardware requirements</ovf:Info>
""")
def test_set_transport_v09(self):
"""Set the transport method for a v0.9 OVF."""
self.command.package = self.v09_ovf
self.command.transports = ['iso']
self.assertRaises(NotImplementedError, self.command.run)
def test_edit_interactive(self):
"""Exercise the interactive CLI for COT edit-properties."""
menu_prompt = """
Please choose a property to edit:
1) login-username "Login Username"
2) login-password "Login Password"
3) mgmt-ipv4-addr "Management IPv4 Address/Mask"
4) mgmt-ipv4-gateway "Management IPv4 Default Gateway"
5) hostname "Router Name"
6) enable-ssh-server "Enable SSH Login"
7) enable-http-server "Enable HTTP Server"
8) enable-https-server "Enable HTTPS Server"
9) privilege-password "Enable Password"
10) domain-name "Domain Name"
Enter property key or number to edit, or 'q' to write changes and quit
""".strip()
username_edit_prompt = """
Key: "login-username"
Label: "Login Username"
Description: "Username for remote login"
Type: "string"
Qualifiers: "MaxLen(64)"
Current Value: ""
Enter new value for this property
""".strip()
ssh_edit_prompt = """
Key: "enable-ssh-server"
Label: "Enable SSH Login"
Description: "Enable remote login via SSH and disable remote login
via telnet. Requires login-username and login-
password to be set!"
Type: "boolean"
Qualifiers: ""
Current Value: "false"
Enter new value for this property
""".strip()
# List of tuples:
# (expected_prompt, input_to_provide, expected_log)
prompt_idx = 0
input_idx = 1
msgs_idx = 2
expected = [
# select by name prefix
(menu_prompt, "login-u", None),
# unchanged value, return to menu
(username_edit_prompt, "", {'levelname': 'INFO',
'msg': 'Value.*unchanged', }),
# select by number
(menu_prompt, "1", None),
# invalid value
(username_edit_prompt,
("thisiswaytoolongofastringtouseforausername"
"whatamipossiblythinking!"),
{'levelname': 'ERROR',
'msg': 'Unsupported value.*login-username.*64 characters', }),
# valid value, update and return to menu
(username_edit_prompt, "hello",
{'levelname': 'INFO',
'msg': 'Successfully updated property', }),
# out of range menu selection
(menu_prompt, "27",
{'levelname': 'ERROR', 'msg': 'Invalid input', }),
# select by number
(menu_prompt, "1", None),
# valid value, return to menu
(re.sub('Value: ""', 'Value: "hello"', username_edit_prompt),
"goodbye",
{'levelname': 'INFO', 'msg': 'Successfully updated property', }),
# ambiguous selection
(menu_prompt, "enable-",
{'levelname': 'ERROR', 'msg': 'Invalid input', }),
# unambiguous selection
(menu_prompt, "enable-ssh", None),
# value to be munged, no change, return
(ssh_edit_prompt, "n",
{'levelname': 'INFO', 'msg': 'Successfully updated property', }),
# unambiguous selection
(menu_prompt, "enable-ssh", None),
# not a valid boolean
(ssh_edit_prompt, "nope",
{'levelname': 'ERROR',
'msg': 'Unsupported value.*enable-ssh-server.*boolean', }),
# valid boolean, update and return to menu
(ssh_edit_prompt, "true",
{'levelname': 'INFO', 'msg': 'Successfully updated property', }),
# done
(menu_prompt, "q", None),
]
def custom_input(prompt,
default_value): # pylint: disable=unused-argument
"""Mock for :meth:`COT.ui.UI.get_input`.
For the parameters, see get_input.
"""
if self.counter > 0:
log = expected[self.counter-1][msgs_idx]
if log is not None:
self.assertLogged(info='After step {0}, '
.format(self.counter - 1),
**log) # pylint: disable=not-a-mapping
else:
self.assertNoLogsOver(logging.INFO,
info='After step {0}, '
.format(self.counter - 1))
# Get output and flush it
# Make sure it matches expectations
self.maxDiff = None
self.assertMultiLineEqual(
expected[self.counter][prompt_idx], prompt,
"failed at index {0}! Expected:\n{1}\nActual:\n{2}".format(
self.counter, expected[self.counter][prompt_idx], prompt))
# Return our canned input
canned_input = expected[self.counter][input_idx]
self.counter += 1
return canned_input
_input = self.command.ui.get_input
try:
self.command.ui.get_input = custom_input
self.command.package = self.input_ovf
self.command.run()
log = expected[self.counter - 1][msgs_idx]
if log is not None:
self.assertLogged(**log) # pylint: disable=not-a-mapping
finally:
self.command.ui.get_input = _input
self.command.finished()
self.check_diff("""
<ovf:Category>1. Bootstrap Properties</ovf:Category>
- <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="">
+ <ovf:Property ovf:key="login-username" ovf:qualifiers="MaxLen(64)" \
ovf:type="string" ovf:userConfigurable="true" ovf:value="goodbye">
<ovf:Label>Login Username</ovf:Label>
...
<ovf:Category>2. Features</ovf:Category>
- <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="false">
+ <ovf:Property ovf:key="enable-ssh-server" ovf:type="boolean" \
ovf:userConfigurable="true" ovf:value="true">
<ovf:Label>Enable SSH Login</ovf:Label>
""")
| {
"content_hash": "2c4c3c2298a5cb43ec1058aa50a7d3ca",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 79,
"avg_line_length": 41.75317604355717,
"alnum_prop": 0.6076675649830479,
"repo_name": "glennmatthews/cot",
"id": "a73526a1e1d12b18ccef364a9f53791cb75b137a",
"size": "23743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COT/commands/tests/test_edit_properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293158"
},
{
"name": "Roff",
"bytes": "37442"
},
{
"name": "Shell",
"bytes": "3840"
}
],
"symlink_target": ""
} |
import six
import time
import sqlalchemy.exc
from sqlalchemy import create_engine, MetaData, Table, Column, String, Float, Text
from sqlalchemy.engine.url import make_url
from pyspider.libs import utils
from pyspider.database.base.projectdb import ProjectDB as BaseProjectDB
from .sqlalchemybase import result2dict
if six.PY3:
where_type = utils.utf8
else:
where_type = utils.text
class ProjectDB(BaseProjectDB):
__tablename__ = 'projectdb'
def __init__(self, url):
self.table = Table(self.__tablename__, MetaData(),
Column('name', String(64)),
Column('group', String(64)),
Column('status', String(16)),
Column('script', Text),
Column('comments', String(1024)),
Column('rate', Float(11)),
Column('burst', Float(11)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, convert_unicode=False)
engine.execute("CREATE DATABASE IF NOT EXISTS %s" % database)
except sqlalchemy.exc.OperationalError:
pass
self.url.database = database
self.engine = create_engine(url, convert_unicode=False)
self.table.create(self.engine, checkfirst=True)
@staticmethod
def _parse(data):
if six.PY3:
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[utils.text(key)] = utils.text(value)
else:
data[utils.text(key)] = value
return data
@staticmethod
def _stringify(data):
if six.PY3:
for key, value in list(six.iteritems(data)):
if isinstance(value, six.string_types):
data[key] = utils.utf8(value)
return data
def insert(self, name, obj={}):
obj = dict(obj)
obj['name'] = name
obj['updatetime'] = time.time()
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def update(self, name, obj={}, **kwargs):
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self.engine.execute(self.table.update()
.where(self.table.c.name == where_type(name))
.values(**self._stringify(obj)))
def get_all(self, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)):
yield self._parse(result2dict(columns, task))
def get(self, name, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.where(self.table.c.name == where_type(name))
.limit(1)
.with_only_columns(columns)):
return self._parse(result2dict(columns, task))
def drop(self, name):
return self.engine.execute(self.table.delete()
.where(self.table.c.name == where_type(name)))
def check_update(self, timestamp, fields=None):
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)
.where(self.table.c.updatetime >= timestamp)):
yield self._parse(result2dict(columns, task))
| {
"content_hash": "2a0332515bea899ffc576a05a183644b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 91,
"avg_line_length": 40.66019417475728,
"alnum_prop": 0.5267430754536772,
"repo_name": "jjyycchh/pyspider",
"id": "83e3e138d5267fb94e0ce1aac38cd159ff31bd93",
"size": "4375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspider/database/sqlalchemy/projectdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24952"
},
{
"name": "HTML",
"bytes": "24503"
},
{
"name": "JavaScript",
"bytes": "50223"
},
{
"name": "Python",
"bytes": "456565"
}
],
"symlink_target": ""
} |
from sense_hat import SenseHat
sense = SenseHat()
def name():
pressure = sense.get_pressure()
return str(round(pressure, 1))
def temp():
temp = sense.get_temperature()
return str(round(temp, 1))
def hum():
hum = sense.get_humidity()
return str(round(hum, 1))
print(name())
print(temp())
print(hum())
| {
"content_hash": "82da3e1a86fd59eecc2959b5dd0b625d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 32,
"avg_line_length": 16.36842105263158,
"alnum_prop": 0.6784565916398714,
"repo_name": "rbontekoe/raspi",
"id": "4146243a7570669f5130f5996adcee952fbfcdc4",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/myhat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "Python",
"bytes": "1040"
}
],
"symlink_target": ""
} |
"""Defines input readers for MapReduce."""
__all__ = [
"AbstractDatastoreInputReader",
"ALLOW_CHECKPOINT",
"BadReaderParamsError",
"BlobstoreLineInputReader",
"BlobstoreZipInputReader",
"BlobstoreZipLineInputReader",
"COUNTER_IO_READ_BYTES",
"COUNTER_IO_READ_MSEC",
"ConsistentKeyReader",
"DatastoreEntityInputReader",
"DatastoreInputReader",
"DatastoreKeyInputReader",
"RandomStringInputReader",
"Error",
"InputReader",
"LogInputReader",
"NamespaceInputReader",
"RecordsReader",
]
import base64
import copy
import logging
import random
import string
import StringIO
import time
import zipfile
from google.net.proto import ProtocolBuffer
from google.appengine.api import datastore
from google.appengine.api import files
from google.appengine.api import logservice
from google.appengine.api.files import records
from google.appengine.api.logservice import log_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import key_range
from google.appengine.ext.db import metadata
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import namespace_range
from google.appengine.ext.mapreduce import operation
from google.appengine.ext.mapreduce import util
Error = errors.Error
BadReaderParamsError = errors.BadReaderParamsError
COUNTER_IO_READ_BYTES = "io-read-bytes"
COUNTER_IO_READ_MSEC = "io-read-msec"
ALLOW_CHECKPOINT = object()
class InputReader(model.JsonMixin):
"""Abstract base class for input readers.
InputReaders have the following properties:
* They are created by using the split_input method to generate a set of
InputReaders from a MapperSpec.
* They generate inputs to the mapper via the iterator interface.
* After creation, they can be serialized and resumed using the JsonMixin
interface.
* They are cast to string for a user-readable description; it may be
valuable to implement __str__.
"""
expand_parameters = False
_APP_PARAM = "_app"
NAMESPACE_PARAM = "namespace"
NAMESPACES_PARAM = "namespaces"
def __iter__(self):
return self
def next(self):
"""Returns the next input from this input reader as a key, value pair.
Returns:
The next input from this input reader.
"""
raise NotImplementedError("next() not implemented in %s" % self.__class__)
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard state.
Args:
input_shard_state: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
raise NotImplementedError("from_json() not implemented in %s" % cls)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
raise NotImplementedError("to_json() not implemented in %s" %
self.__class__)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
raise NotImplementedError("split_input() not implemented in %s" % cls)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Input reader parameters are expected to be passed as "input_reader"
subdictionary of mapper_spec.params. To be compatible with previous
API input reader is advised to check mapper_spec.params and issue
a warning if "input_reader" subdicationary is not present.
_get_params helper method can be used to simplify implementation.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
raise NotImplementedError("validate() not implemented in %s" % cls)
def _get_params(mapper_spec, allowed_keys=None):
"""Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
"""
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if allowed_keys:
raise errors.BadReaderParamsError(message)
else:
logging.warning(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
class AbstractDatastoreInputReader(InputReader):
"""Abstract base class for classes that iterate over datastore entities.
Concrete subclasses must implement _iter_key_range(self, k_range). See the
docstring for that method for details.
"""
_BATCH_SIZE = 50
_MAX_SHARD_COUNT = 256
_OVERSAMPLING_FACTOR = 32
MAX_NAMESPACES_FOR_KEY_SHARD = 10
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
NAMESPACE_RANGE_PARAM = "namespace_range"
CURRENT_KEY_RANGE_PARAM = "current_key_range"
FILTERS_PARAM = "filters"
def __init__(self,
entity_kind,
key_ranges=None,
ns_range=None,
batch_size=_BATCH_SIZE,
current_key_range=None,
filters=None):
"""Create new AbstractDatastoreInputReader object.
This is internal constructor. Use split_query in a concrete class instead.
Args:
entity_kind: entity kind as string.
key_ranges: a sequence of key_range.KeyRange instances to process. Only
one of key_ranges or ns_range can be non-None.
ns_range: a namespace_range.NamespaceRange to process. Only one of
key_ranges or ns_range can be non-None.
batch_size: size of read batch as int.
current_key_range: the current key_range.KeyRange being processed.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
"""
assert key_ranges is not None or ns_range is not None, (
"must specify one of 'key_ranges' or 'ns_range'")
assert key_ranges is None or ns_range is None, (
"can't specify both 'key_ranges ' and 'ns_range'")
self._entity_kind = entity_kind
self._key_ranges = key_ranges and list(reversed(key_ranges))
self._ns_range = ns_range
self._batch_size = int(batch_size)
self._current_key_range = current_key_range
self._filters = filters
@classmethod
def _get_raw_entity_kind(cls, entity_kind):
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
return entity_kind
def __iter__(self):
"""Iterates over the given KeyRanges or NamespaceRange.
This method iterates over the given KeyRanges or NamespaceRange and sets
the self._current_key_range to the KeyRange currently being processed. It
then delegates to the _iter_key_range method to yield that actual
results.
Yields:
Forwards the objects yielded by the subclasses concrete _iter_key_range()
method. The caller must consume the result yielded because self.to_json()
will not include it.
"""
if self._key_ranges is not None:
for o in self._iter_key_ranges():
yield o
elif self._ns_range is not None:
for o in self._iter_ns_range():
yield o
else:
assert False, "self._key_ranges and self._ns_range are both None"
def _iter_key_ranges(self):
"""Iterates over self._key_ranges, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
if self._key_ranges:
self._current_key_range = self._key_ranges.pop()
continue
else:
break
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
self._current_key_range.advance(key)
yield o
self._current_key_range = None
def _iter_ns_range(self):
"""Iterates over self._ns_range, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break
namespace = namespace_result[0].name() or ""
self._current_key_range = key_range.KeyRange(
namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
self._current_key_range.advance(key)
yield o
if (self._ns_range.is_single_namespace or
self._current_key_range.namespace == self._ns_range.namespace_end):
break
self._ns_range = self._ns_range.with_start_after(
self._current_key_range.namespace)
self._current_key_range = None
def _iter_key_range(self, k_range):
"""Yields a db.Key and the value that should be yielded by self.__iter__().
Args:
k_range: The key_range.KeyRange to iterate over.
Yields:
A 2-tuple containing the last db.Key processed and the value that should
be yielded by __iter__. The returned db.Key will be used to determine the
InputReader's current position in self._current_key_range.
"""
raise NotImplementedError("_iter_key_range() not implemented in %s" %
self.__class__)
def __str__(self):
"""Returns the string representation of this InputReader."""
if self._ns_range is None:
return repr(self._key_ranges)
else:
return repr(self._ns_range)
@classmethod
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of db.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
@classmethod
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count):
"""Return KeyRange objects. Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
"""
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
if not random_keys:
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
random_keys = cls._choose_split_points(random_keys, shard_count)
key_ranges = []
key_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
key_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
key_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(key_ranges) < shard_count:
key_ranges = key_ranges + [None] * (shard_count - len(key_ranges))
return key_ranges
@classmethod
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
key_ranges = []
for namespace in namespaces:
key_ranges.extend(
cls._split_input_from_namespace(app,
namespace,
entity_kind_name,
shard_count))
shared_ranges = [[] for _ in range(shard_count)]
for i, k_range in enumerate(key_ranges):
shared_ranges[i % shard_count].append(k_range)
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
return [cls(entity_kind_name,
key_ranges=key_ranges,
ns_range=None,
batch_size=batch_size)
for key_ranges in shared_ranges if key_ranges]
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, tuple):
raise BadReaderParamsError("Filter should be a tuple: %s", f)
if len(f) != 3:
raise BadReaderParamsError("Filter should be a 3-tuple: %s", f)
if not isinstance(f[0], basestring):
raise BadReaderParamsError("First element should be string: %s", f)
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f)
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May have 'namespace' in the params as a string containing a single
namespace. If specified then the input reader will only yield values
in the given namespace. If 'namespace' is not given then values from
all namespaces will be yielded. May also have 'batch_size' in the params
to specify the number of entities to process in each batch.
Returns:
A list of InputReader objects. If the query results are empty then the
empty list will be returned. Otherwise, the list will always have a length
equal to number_of_shards but may be padded with Nones if there are too
few results for effective sharding.
"""
params = _get_params(mapper_spec)
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace = params.get(cls.NAMESPACE_PARAM)
app = params.get(cls._APP_PARAM)
filters = params.get(cls.FILTERS_PARAM)
if namespace is None:
namespace_query = datastore.Query("__namespace__",
keys_only=True,
_app=app)
namespace_keys = namespace_query.Get(
limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=True,
_app=app)
return [cls(entity_kind_name,
key_ranges=None,
ns_range=ns_range,
batch_size=batch_size,
filters=filters)
for ns_range in ns_ranges]
elif not namespace_keys:
return [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(),
batch_size=shard_count,
filters=filters)]
else:
namespaces = [namespace_key.name() or ""
for namespace_key in namespace_keys]
else:
namespaces = [namespace]
readers = cls._split_input_from_params(
app, namespaces, entity_kind_name, params, shard_count)
if filters:
for reader in readers:
reader._filters = filters
return readers
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size,
self.FILTERS_PARAM: self._filters}
return json_dict
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
if json[cls.KEY_RANGE_PARAM] is None:
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NAMESPACE_RANGE_PARAM] is None:
ns_range = None
else:
ns_range = namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM])
if json[cls.CURRENT_KEY_RANGE_PARAM] is None:
current_key_range = None
else:
current_key_range = key_range.KeyRange.from_json(
json[cls.CURRENT_KEY_RANGE_PARAM])
return cls(
json[cls.ENTITY_KIND_PARAM],
key_ranges,
ns_range,
json[cls.BATCH_SIZE_PARAM],
current_key_range,
filters=json.get(cls.FILTERS_PARAM))
class DatastoreInputReader(AbstractDatastoreInputReader):
"""Represents a range in query results.
DatastoreInputReader yields model instances from the entities in a given key
range. Iterating over DatastoreInputReader changes its range past consumed
entries.
The class shouldn't be instantiated directly. Use the split_input class method
instead.
"""
def _iter_key_range(self, k_range):
cursor = None
while True:
query = k_range.make_ascending_query(
util.for_name(self._entity_kind),
filters=self._filters)
if isinstance(query, db.Query):
if cursor:
query.with_cursor(cursor)
results = query.fetch(limit=self._batch_size)
if not results:
break
for model_instance in results:
key = model_instance.key()
yield key, model_instance
cursor = query.cursor()
else:
results, cursor, more = query.fetch_page(self._batch_size,
start_cursor=cursor)
for model_instance in results:
key = model_instance.key
yield key, model_instance
if not more:
break
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
super(DatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
keys_only = util.parse_bool(params.get(cls.KEYS_ONLY_PARAM, False))
if keys_only:
raise BadReaderParamsError("The keys_only parameter is obsolete. "
"Use DatastoreKeyInputReader instead.")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
try:
util.for_name(entity_kind_name)
except ImportError, e:
raise BadReaderParamsError("Bad entity kind: %s" % e)
@classmethod
def _get_raw_entity_kind(cls, entity_kind):
"""Returns an entity kind to use with datastore calls."""
entity_type = util.for_name(entity_kind)
if isinstance(entity_kind, db.Model):
return entity_type.kind()
else:
return util.get_short_name(entity_kind)
class DatastoreKeyInputReader(AbstractDatastoreInputReader):
"""An input reader which takes a Kind and yields Keys for that kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(
raw_entity_kind, keys_only=True, filters=self._filters)
for key in query.Run(
config=datastore_query.QueryOptions(batch_size=self._batch_size)):
yield key, key
class DatastoreEntityInputReader(AbstractDatastoreInputReader):
"""An input reader which yields low level datastore entities for a kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(
raw_entity_kind, self._filters)
for entity in query.Run(
config=datastore_query.QueryOptions(batch_size=self._batch_size)):
yield entity.key(), entity
class BlobstoreLineInputReader(InputReader):
"""Input reader for a newline delimited blob in Blobstore."""
_BLOB_BUFFER_SIZE = 64000
_MAX_SHARD_COUNT = 256
_MAX_BLOB_KEYS_COUNT = 246
BLOB_KEYS_PARAM = "blob_keys"
INITIAL_POSITION_PARAM = "initial_position"
END_POSITION_PARAM = "end_position"
BLOB_KEY_PARAM = "blob_key"
def __init__(self, blob_key, start_position, end_position):
"""Initializes this instance with the given blob key and character range.
This BlobstoreInputReader will read from the first record starting after
strictly after start_position until the first record ending at or after
end_position (exclusive). As an exception, if start_position is 0, then
this InputReader starts reading at the first record.
Args:
blob_key: the BlobKey that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._blob_key = blob_key
self._blob_reader = blobstore.BlobReader(blob_key,
self._BLOB_BUFFER_SIZE,
start_position)
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this BlobstoreLineInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._blob_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
class BlobstoreZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the Blobstore.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
"""
_MAX_SHARD_COUNT = 256
BLOB_KEY_PARAM = "blob_key"
START_INDEX_PARAM = "start_index"
END_INDEX_PARAM = "end_index"
def __init__(self, blob_key, start_index, end_index,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
blob_key: the BlobKey that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_index = start_index
self._end_index = end_index
self._reader = _reader
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this BlobstoreZipInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._start_index, self._end_index)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
"""
params = _get_params(mapper_spec)
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
files = zip_input.infolist()
total_size = sum(x.file_size for x in files)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(files):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(files):
shard_start_indexes.append(len(files))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
class BlobstoreZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from Blobstore.
This has the same external interface as the BlobstoreLineInputReader, in that
it takes a list of blobs as its input and yields lines to the reader.
However the blobs themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
"""
_MAX_SHARD_COUNT = 256
_MAX_BLOB_KEYS_COUNT = 246
BLOB_KEYS_PARAM = "blob_keys"
BLOB_KEY_PARAM = "blob_key"
START_FILE_INDEX_PARAM = "start_file_index"
END_FILE_INDEX_PARAM = "end_file_index"
OFFSET_PARAM = "offset"
def __init__(self, blob_key, start_file_index, end_file_index, offset,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipLineInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
blob_key: the BlobKey that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._reader = _reader
self._zip = None
self._entries = None
self._filestream = None
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
files = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in files:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json, _reader=blobstore.BlobReader):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string blobkey:[start file num, end file num]:current offset.
"""
return "blobstore.BlobKey(%r):[%d, %d]:%d" % (
self._blob_key, self._start_file_index, self._end_file_index,
self._next_offset())
class RandomStringInputReader(InputReader):
"""RandomStringInputReader generates random strings as output.
Primary usage is to populate output with testing entries.
"""
COUNT = "count"
STRING_LENGTH = "string_length"
DEFAULT_STRING_LENGTH = 10
def __init__(self, count, string_length):
"""Initialize input reader.
Args:
count: number of entries this shard should generate.
string_length: the length of generated random strings.
"""
self._count = count
self._string_length = string_length
def __iter__(self):
ctx = context.get()
while self._count:
self._count -= 1
start_time = time.time()
content = "".join(random.choice(string.ascii_lowercase)
for _ in range(self._string_length))
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
yield content
@classmethod
def split_input(cls, mapper_spec):
params = _get_params(mapper_spec)
count = params[cls.COUNT]
string_length = cls.DEFAULT_STRING_LENGTH
if cls.STRING_LENGTH in params:
string_length = params[cls.STRING_LENGTH]
shard_count = mapper_spec.shard_count
count_per_shard = count // shard_count
mr_input_readers = [
cls(count_per_shard, string_length) for _ in range(shard_count)]
left = count - count_per_shard*shard_count
if left > 0:
mr_input_readers.append(cls(left, string_length))
return mr_input_readers
@classmethod
def validate(cls, mapper_spec):
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.COUNT not in params:
raise BadReaderParamsError("Must specify %s" % cls.COUNT)
if not isinstance(params[cls.COUNT], int):
raise BadReaderParamsError("%s should be an int but is %s" %
(cls.COUNT, type(params[cls.COUNT])))
if params[cls.COUNT] <= 0:
raise BadReaderParamsError("%s should be a positive int")
if cls.STRING_LENGTH in params and not (
isinstance(params[cls.STRING_LENGTH], int) and
params[cls.STRING_LENGTH] > 0):
raise BadReaderParamsError("%s should be a positive int but is %s" %
(cls.STRING_LENGTH, params[cls.STRING_LENGTH]))
if (not isinstance(mapper_spec.shard_count, int) or
mapper_spec.shard_count <= 0):
raise BadReaderParamsError(
"shard_count should be a positive int but is %s" %
mapper_spec.shard_count)
@classmethod
def from_json(cls, json):
return cls(json[cls.COUNT], json[cls.STRING_LENGTH])
def to_json(self):
return {self.COUNT: self._count, self.STRING_LENGTH: self._string_length}
class ConsistentKeyReader(DatastoreKeyInputReader):
"""A key reader which reads consistent data from datastore.
Datastore might have entities which were written, but not visible through
queries for some time. Typically these entities can be only read inside
transaction until they are 'applied'.
This reader reads all keys even if they are not visible. It might take
significant time to start yielding some data because it has to apply all
modifications created before its start.
"""
START_TIME_US_PARAM = "start_time_us"
UNAPPLIED_LOG_FILTER = "__unapplied_log_timestamp_us__ <"
DUMMY_KIND = "DUMMY_KIND"
DUMMY_ID = 106275677020293L
UNAPPLIED_QUERY_DEADLINE = 270
def _get_unapplied_jobs_accross_namespaces(self,
namespace_start,
namespace_end,
app):
filters = {"__key__ >=": db.Key.from_path("__namespace__",
namespace_start or 1,
_app=app),
"__key__ <=": db.Key.from_path("__namespace__",
namespace_end or 1,
_app=app),
self.UNAPPLIED_LOG_FILTER: self.start_time_us}
unapplied_query = datastore.Query(filters=filters, keys_only=True, _app=app)
return unapplied_query.Get(
limit=self._batch_size,
config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE))
def _iter_ns_range(self):
while True:
unapplied_jobs = self._get_unapplied_jobs_accross_namespaces(
self._ns_range.namespace_start,
self._ns_range.namespace_end,
self._ns_range.app)
if not unapplied_jobs:
break
self._apply_jobs(unapplied_jobs)
for o in super(ConsistentKeyReader, self)._iter_ns_range():
yield o
def _iter_key_range(self, k_range):
assert hasattr(self, "start_time_us"), "start_time_us property was not set"
if self._ns_range is None:
self._apply_key_range(k_range)
for o in super(ConsistentKeyReader, self)._iter_key_range(k_range):
yield o
def _apply_key_range(self, k_range):
"""Apply all jobs in the given KeyRange."""
apply_range = copy.deepcopy(k_range)
while True:
unapplied_query = self._make_unapplied_query(apply_range)
unapplied_jobs = unapplied_query.Get(
limit=self._batch_size,
config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE))
if not unapplied_jobs:
break
self._apply_jobs(unapplied_jobs)
apply_range.advance(unapplied_jobs[-1])
def _make_unapplied_query(self, k_range):
"""Returns a datastore.Query that finds the unapplied keys in k_range."""
unapplied_query = k_range.make_ascending_datastore_query(
kind=None, keys_only=True)
unapplied_query[
ConsistentKeyReader.UNAPPLIED_LOG_FILTER] = self.start_time_us
return unapplied_query
def _apply_jobs(self, unapplied_jobs):
"""Apply all jobs implied by the given keys."""
keys_to_apply = []
for key in unapplied_jobs:
path = key.to_path() + [ConsistentKeyReader.DUMMY_KIND,
ConsistentKeyReader.DUMMY_ID]
keys_to_apply.append(
db.Key.from_path(_app=key.app(), namespace=key.namespace(), *path))
db.get(keys_to_apply, config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE,
read_policy=datastore_rpc.Configuration.APPLY_ALL_JOBS_CONSISTENCY))
@classmethod
def _split_input_from_namespace(cls,
app,
namespace,
entity_kind_name,
shard_count):
key_ranges = super(ConsistentKeyReader, cls)._split_input_from_namespace(
app, namespace, entity_kind_name, shard_count)
assert len(key_ranges) == shard_count
try:
last_key_range_index = key_ranges.index(None) - 1
except ValueError:
last_key_range_index = shard_count - 1
if last_key_range_index != -1:
key_ranges[0].key_start = None
key_ranges[0].include_start = False
key_ranges[last_key_range_index].key_end = None
key_ranges[last_key_range_index].include_end = False
return key_ranges
@classmethod
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
readers = super(ConsistentKeyReader, cls)._split_input_from_params(
app,
namespaces,
entity_kind_name,
params,
shard_count)
if not readers:
readers = [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(),
batch_size=shard_count)]
return readers
@classmethod
def split_input(cls, mapper_spec):
"""Splits input into key ranges."""
readers = super(ConsistentKeyReader, cls).split_input(mapper_spec)
start_time_us = _get_params(mapper_spec).get(
cls.START_TIME_US_PARAM, long(time.time() * 1e6))
for reader in readers:
reader.start_time_us = start_time_us
return readers
def to_json(self):
"""Serializes all the data in this reader into json form.
Returns:
all the data in json-compatible map.
"""
json_dict = super(DatastoreKeyInputReader, self).to_json()
json_dict[self.START_TIME_US_PARAM] = self.start_time_us
return json_dict
@classmethod
def from_json(cls, json):
"""Create new ConsistentKeyReader from the json, encoded by to_json.
Args:
json: json map representation of ConsistentKeyReader.
Returns:
an instance of ConsistentKeyReader with all data deserialized from json.
"""
reader = super(ConsistentKeyReader, cls).from_json(json)
reader.start_time_us = json[cls.START_TIME_US_PARAM]
return reader
class NamespaceInputReader(InputReader):
"""An input reader to iterate over namespaces.
This reader yields namespace names as string.
It will always produce only one shard.
"""
NAMESPACE_RANGE_PARAM = "namespace_range"
BATCH_SIZE_PARAM = "batch_size"
_BATCH_SIZE = 10
def __init__(self, ns_range, batch_size = _BATCH_SIZE):
self.ns_range = ns_range
self._batch_size = batch_size
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
return {self.NAMESPACE_RANGE_PARAM: self.ns_range.to_json_object(),
self.BATCH_SIZE_PARAM: self._batch_size}
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
return cls(
namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM]),
json[cls.BATCH_SIZE_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
batch_size = int(_get_params(mapper_spec).get(
cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace_ranges = namespace_range.NamespaceRange.split(shard_count,
contiguous=True)
return [NamespaceInputReader(ns_range, batch_size)
for ns_range in namespace_ranges]
def __iter__(self):
while True:
keys = self.ns_range.make_datastore_query().Get(limit=self._batch_size)
if not keys:
break
for key in keys:
namespace = metadata.Namespace.key_to_namespace(key)
self.ns_range = self.ns_range.with_start_after(namespace)
yield namespace
def __str__(self):
return repr(self.ns_range)
class RecordsReader(InputReader):
"""Reader to read a list of Files API file in records format.
The number of input shards can be specified by the SHARDS_PARAM
mapper parameter. Input files cannot be split, so there will be at most
one shard per file. Also the number of shards will not be reduced based on
the number of input files, so shards in always equals shards out.
"""
FILE_PARAM = "file"
FILES_PARAM = "files"
def __init__(self, filenames, position):
"""Constructor.
Args:
filenames: list of filenames.
position: file position to start reading from as int.
"""
self._filenames = filenames
if self._filenames:
self._reader = records.RecordsReader(
files.BufferedFile(self._filenames[0]))
self._reader.seek(position)
else:
self._reader = None
def __iter__(self):
"""Iterate over records in file.
Yields records as strings.
"""
ctx = context.get()
while self._reader:
try:
start_time = time.time()
record = self._reader.read()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(record))(ctx)
yield record
except (files.ExistenceError), e:
raise errors.FailJobError("ExistenceError: %s" % e)
except (files.UnknownError), e:
raise errors.RetrySliceError("UnknownError: %s" % e)
except EOFError:
self._filenames.pop(0)
if not self._filenames:
self._reader = None
else:
self._reader = records.RecordsReader(
files.BufferedFile(self._filenames[0]))
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json["filenames"], json["position"])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = {
"filenames": self._filenames,
"position": 0,
}
if self._reader:
result["position"] = self._reader.tell()
return result
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
if cls.FILES_PARAM in params:
filenames = params[cls.FILES_PARAM]
if isinstance(filenames, basestring):
filenames = filenames.split(",")
else:
filenames = [params[cls.FILE_PARAM]]
batch_list = [[] for _ in xrange(shard_count)]
for index, filename in enumerate(filenames):
batch_list[index % shard_count].append(filenames[index])
batch_list.sort(reverse=True, key=lambda x: len(x))
return [cls(batch, 0) for batch in batch_list]
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if (cls.FILES_PARAM not in params and
cls.FILE_PARAM not in params):
raise BadReaderParamsError(
"Must specify '%s' or '%s' parameter for mapper input" %
(cls.FILES_PARAM, cls.FILE_PARAM))
def __str__(self):
position = 0
if self._reader:
position = self._reader.tell()
return "%s:%s" % (self._filenames, position)
class LogInputReader(InputReader):
"""Input reader for a time range of logs via the Logs Reader API.
The number of input shards may be specified by the SHARDS_PARAM mapper
parameter. A starting and ending time (in seconds since the Unix epoch) are
required to generate time ranges over which to shard the input.
"""
START_TIME_PARAM = "start_time"
END_TIME_PARAM = "end_time"
MINIMUM_LOG_LEVEL_PARAM = "minimum_log_level"
INCLUDE_INCOMPLETE_PARAM = "include_incomplete"
INCLUDE_APP_LOGS_PARAM = "include_app_logs"
VERSION_IDS_PARAM = "version_ids"
_OFFSET_PARAM = "offset"
_PROTOTYPE_REQUEST_PARAM = "prototype_request"
_PARAMS = frozenset([START_TIME_PARAM, END_TIME_PARAM, _OFFSET_PARAM,
MINIMUM_LOG_LEVEL_PARAM, INCLUDE_INCOMPLETE_PARAM,
INCLUDE_APP_LOGS_PARAM, VERSION_IDS_PARAM,
_PROTOTYPE_REQUEST_PARAM])
_KWARGS = frozenset([_OFFSET_PARAM, _PROTOTYPE_REQUEST_PARAM])
def __init__(self,
start_time=None,
end_time=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=False,
version_ids=None,
**kwargs):
"""Constructor.
Args:
start_time: The earliest request completion or last-update time of logs
that should be mapped over, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that logs
should be mapped over, in seconds since the Unix epoch.
minimum_log_level: An application log level which serves as a filter on
the requests mapped over--requests with no application log at or above
the specified level will be omitted, even if include_app_logs is False.
include_incomplete: Whether or not to include requests that have started
but not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
mapped logs, as a boolean. Defaults to False.
version_ids: A list of version ids whose logs should be mapped against.
"""
InputReader.__init__(self)
self.__params = dict(kwargs)
if start_time is not None:
self.__params[self.START_TIME_PARAM] = start_time
if end_time is not None:
self.__params[self.END_TIME_PARAM] = end_time
if minimum_log_level is not None:
self.__params[self.MINIMUM_LOG_LEVEL_PARAM] = minimum_log_level
if include_incomplete is not None:
self.__params[self.INCLUDE_INCOMPLETE_PARAM] = include_incomplete
if include_app_logs is not None:
self.__params[self.INCLUDE_APP_LOGS_PARAM] = include_app_logs
if version_ids:
self.__params[self.VERSION_IDS_PARAM] = version_ids
if self._PROTOTYPE_REQUEST_PARAM in self.__params:
prototype_request = log_service_pb.LogReadRequest(
self.__params[self._PROTOTYPE_REQUEST_PARAM])
self.__params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request
def __iter__(self):
"""Iterates over logs in a given range of time.
Yields:
A RequestLog containing all the information for a single request.
"""
for log in logservice.fetch(**self.__params):
self.__params[self._OFFSET_PARAM] = log.offset
yield log
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
"""
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A JSON serializable version of the remaining input to read.
"""
params = dict(self.__params)
if self._PROTOTYPE_REQUEST_PARAM in params:
prototype_request = params[self._PROTOTYPE_REQUEST_PARAM]
params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode()
if self._OFFSET_PARAM in params:
params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM])
return params
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
shards = []
for _ in xrange(shard_count - 1):
params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +
seconds_per_shard)
shards.append(LogInputReader(**params))
params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]
params[cls.END_TIME_PARAM] = end_time
return shards + [LogInputReader(**params)]
@classmethod
def validate(cls, mapper_spec):
"""Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if cls.VERSION_IDS_PARAM not in params:
raise errors.BadReaderParamsError("Must specify a list of version ids "
"for mapper input")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
def __str__(self):
"""Returns the string representation of this LogInputReader."""
params = []
for key, value in self.__params.iteritems():
if key is self._PROTOTYPE_REQUEST_PARAM:
params.append("%s='%s'" % (key, value))
elif key is self._OFFSET_PARAM:
params.append("%s='%s'" % (key, value))
else:
params.append("%s=%s" % (key, value))
return "LogInputReader(%s)" % ", ".join(params)
| {
"content_hash": "0af9e3eb610b01de3db0ca350a1dd3f1",
"timestamp": "",
"source": "github",
"line_count": 2042,
"max_line_length": 80,
"avg_line_length": 32.67286973555338,
"alnum_prop": 0.644578674420696,
"repo_name": "undoware/neutron-drive",
"id": "d525818b385c1a7ff5ce7a127891cd43d015f2cb",
"size": "67335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_appengine/google/appengine/ext/mapreduce/input_readers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "33528"
},
{
"name": "ActionScript",
"bytes": "87423"
},
{
"name": "C",
"bytes": "2369361"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Java",
"bytes": "374"
},
{
"name": "JavaScript",
"bytes": "1079537"
},
{
"name": "PHP",
"bytes": "33052"
},
{
"name": "Perl",
"bytes": "2901"
},
{
"name": "Python",
"bytes": "31717461"
},
{
"name": "Ruby",
"bytes": "1337"
},
{
"name": "Shell",
"bytes": "7982"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
import remi.gui as gui
from remi import start, App
from remi_ext import TreeTable, SingleRowSelectionTable
class MyApp(App):
def __init__(self, *args):
super(MyApp, self).__init__(*args)
def main(self):
self.wid = gui.VBox(style={'margin':'5px auto', 'padding': '10px'})
table = [('', '#ff9', 'cable', '1', '2', '3'),
('cable', '#ff9', '1-core cable', '1', '2', '3'),
('cable', '#ff9', 'multi core cable', '1', '2', '3'),
('multi core cable', '#ff9', '2-core cable', '1', '2', '3'),
('multi core cable', '#ff9', '3-core cable', '1', '2', '3'),
('3-core cable', '#ff9', '3-core armoured cable', '1', '2', '3'),
('cable', '#ff9', 'armoured cable', '1', '2', '3'),
('armoured cable', '#ff9', '3-core armoured cable', '1', '2', '3')]
heads_color = '#dfd'
uoms_color = '#ffd'
heads = ['heads', heads_color, 'object', 'one', 'two', 'three']
uoms = ['uom', uoms_color, '', 'mm', 'cm', 'dm']
self.My_TreeTable(table, heads, heads2=uoms)
return self.wid
def My_TreeTable(self, table, heads, heads2=None):
''' Define and display a table
in which the values in first column form one or more trees.
'''
self.Define_TreeTable(heads, heads2)
self.Display_TreeTable(table)
def Define_TreeTable(self, heads, heads2=None):
''' Define a TreeTable with a heading row
and optionally a second heading row.
'''
display_heads = []
display_heads.append(tuple(heads[2:]))
self.tree_table = TreeTable()
self.tree_table.append_from_list(display_heads, fill_title=True)
if heads2 is not None:
heads2_color = heads2[1]
row_widget = gui.TableRow()
for index, field in enumerate(heads2[2:]):
row_item = gui.TableItem(text=field,
style={'background-color': heads2_color})
row_widget.append(row_item, field)
self.tree_table.append(row_widget, heads2[0])
self.wid.append(self.tree_table)
def Display_TreeTable(self, table):
''' Display a table in which the values in first column form one or more trees.
The table has row with fields that are strings of identifiers/names.
First convert each row into a row_widget and item_widgets
that are displayed in a TableTree.
Each input row shall start with a parent field (field[0])
that determines the tree hierarchy but that is not displayed on that row.
The parent widget becomes an attribute of the first child widget.
Field[1] is the row color, field[2:] contains the row values.
Top child(s) shall have a parent field value that is blank ('').
The input table rows shall be in the correct sequence.
'''
parent_names = []
hierarchy = {}
indent_level = 0
widget_dict = {} # key, value = name, widget
for row in table:
parent_name = row[0]
row_color = row[1]
child_name = row[2]
row_widget = gui.TableRow(style={'background-color': row_color})
# Determine whether hierarchy of sub_sub concepts shall be open or not
openness = 'true'
row_widget.attributes['treeopen'] = openness
# widget_dict[child_name] = row_widget
for index, field in enumerate(row[2:]):
# Determine field color
field_color = '#ffff99'
row_item = gui.TableItem(text=field,
style={'text-align': 'left',
'background-color': field_color})
row_widget.append(row_item, field)
if index == 0:
row_item.parent = parent_name
child_id = row_item
# The root of each tree has a parent that is blank ('').
# Each row with childs has as attribute openness, which by default is 'true'.
# The fields can be given other attributes, such as color.
# Verify whether the parent_name (child.parent)
# is present or is in the list of parent_names.
print('parent-child:', parent_name, child_name)
if parent_name == '':
hierarchy[child_name] = 0
parent_names.append(child_name)
target_level = 0
elif parent_name in parent_names:
hierarchy[child_name] = hierarchy[parent_name] + 1
target_level = hierarchy[child_name]
else:
# Parent not in parent_names
print('Error: Parent name "{}" does not appear in network'
.format(parent_name))
return
print('indent, target-pre:', indent_level, target_level,
parent_name, child_name)
# Indentation
if target_level > indent_level:
self.tree_table.begin_fold()
indent_level += 1
if target_level < indent_level:
while target_level < indent_level:
indent_level += -1
self.tree_table.end_fold()
print('indent, target-post:', indent_level, target_level,
parent_name, child_name)
if child_name not in parent_names:
parent_names.append(child_name)
self.tree_table.append(row_widget, child_name)
if __name__ == "__main__":
# starts the webserver
# optional parameters
start(MyApp,address='127.0.0.1', port=8081, multiple_instance=False,
enable_file_cache=True, update_interval=0.1, start_browser=True)
# start(MyApp, debug=True, address='0.0.0.0', port=0 )
| {
"content_hash": "3094d4460c027a29475367944d4fe551",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 89,
"avg_line_length": 46.8125,
"alnum_prop": 0.5358811748998665,
"repo_name": "dddomodossola/gui",
"id": "1b8ab865f0f2b395dcdb102286dbe8e547771091",
"size": "5992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/examples_from_contributors/Display_TreeTable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5057"
},
{
"name": "Python",
"bytes": "212276"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
#app_name = 'books'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^books/$', views.book_list, name='books'),
url(r'^deals/$', views.deals, name='deals'),
url(r'^contact/$', views.contact, name='contact'),
url(r'^(?P<id>\d+)/(?P<slug>[-\w]+)/$',
views.book_detail,
name='book_detail'),
url(r'^subscribe/', views.subscribe, name='subscribe'),
url(r'^login', views.login, name='login'),
url(r'^logout/', views.logout, name='logout'),
url(r'^signup/', views.signup, name='signup'),
url(r'^dashboard/', views.dashboard, name='dashboard'),
url(r'^account/', views.account, name='account'),
url(r'^search', views.search, name='search')
] | {
"content_hash": "118bc61e00169780195265f6d45c3f7c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.6,
"repo_name": "rambasnet/bookstore",
"id": "3dab5769d02a8f79d278684f61e4713c2a7ccd0f",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33"
},
{
"name": "HTML",
"bytes": "15360"
},
{
"name": "Python",
"bytes": "32237"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="layout.yaxis.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "96c39cda6ece2eef437b7181ff17620b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.578,
"repo_name": "plotly/python-api",
"id": "8565798a9d328286c8c0aa59a9c08b3ec8e015a3",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/yaxis/tickfont/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
import subprocess
from ducktape.template import TemplateRenderer
from kafkatest.services.security.minikdc import MiniKdc
class Keytool(object):
@staticmethod
def generate_keystore_truststore(ssl_dir='.'):
"""
Generate JKS keystore and truststore and return
Kafka SSL properties with these stores.
"""
ks_path = os.path.join(ssl_dir, 'test.keystore.jks')
ks_password = 'test-ks-passwd'
key_password = 'test-key-passwd'
ts_path = os.path.join(ssl_dir, 'test.truststore.jks')
ts_password = 'test-ts-passwd'
if os.path.exists(ks_path):
os.remove(ks_path)
if os.path.exists(ts_path):
os.remove(ts_path)
Keytool.runcmd("keytool -genkeypair -alias test -keyalg RSA -keysize 2048 -keystore %s -storetype JKS -keypass %s -storepass %s -dname CN=systemtest" % (ks_path, key_password, ks_password))
Keytool.runcmd("keytool -export -alias test -keystore %s -storepass %s -storetype JKS -rfc -file test.crt" % (ks_path, ks_password))
Keytool.runcmd("keytool -import -alias test -file test.crt -keystore %s -storepass %s -storetype JKS -noprompt" % (ts_path, ts_password))
os.remove('test.crt')
return {
'ssl.keystore.location' : ks_path,
'ssl.keystore.password' : ks_password,
'ssl.key.password' : key_password,
'ssl.truststore.location' : ts_path,
'ssl.truststore.password' : ts_password
}
@staticmethod
def runcmd(cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, cmd)
class SecurityConfig(TemplateRenderer):
PLAINTEXT = 'PLAINTEXT'
SSL = 'SSL'
SASL_PLAINTEXT = 'SASL_PLAINTEXT'
SASL_SSL = 'SASL_SSL'
SASL_MECHANISM_GSSAPI = 'GSSAPI'
SASL_MECHANISM_PLAIN = 'PLAIN'
CONFIG_DIR = "/mnt/security"
KEYSTORE_PATH = "/mnt/security/test.keystore.jks"
TRUSTSTORE_PATH = "/mnt/security/test.truststore.jks"
JAAS_CONF_PATH = "/mnt/security/jaas.conf"
KRB5CONF_PATH = "/mnt/security/krb5.conf"
KEYTAB_PATH = "/mnt/security/keytab"
ssl_stores = Keytool.generate_keystore_truststore('.')
def __init__(self, security_protocol=None, interbroker_security_protocol=None, sasl_mechanism=SASL_MECHANISM_GSSAPI, zk_sasl=False, template_props=""):
"""
Initialize the security properties for the node and copy
keystore and truststore to the remote node if the transport protocol
is SSL. If security_protocol is None, the protocol specified in the
template properties file is used. If no protocol is specified in the
template properties either, PLAINTEXT is used as default.
"""
if security_protocol is None:
security_protocol = self.get_property('security.protocol', template_props)
if security_protocol is None:
security_protocol = SecurityConfig.PLAINTEXT
elif security_protocol not in [SecurityConfig.PLAINTEXT, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT, SecurityConfig.SASL_SSL]:
raise Exception("Invalid security.protocol in template properties: " + security_protocol)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.has_sasl = self.is_sasl(security_protocol) or self.is_sasl(interbroker_security_protocol) or zk_sasl
self.has_ssl = self.is_ssl(security_protocol) or self.is_ssl(interbroker_security_protocol)
self.zk_sasl = zk_sasl
self.properties = {
'security.protocol' : security_protocol,
'ssl.keystore.location' : SecurityConfig.KEYSTORE_PATH,
'ssl.keystore.password' : SecurityConfig.ssl_stores['ssl.keystore.password'],
'ssl.key.password' : SecurityConfig.ssl_stores['ssl.key.password'],
'ssl.truststore.location' : SecurityConfig.TRUSTSTORE_PATH,
'ssl.truststore.password' : SecurityConfig.ssl_stores['ssl.truststore.password'],
'sasl.mechanism' : sasl_mechanism,
'sasl.kerberos.service.name' : 'kafka'
}
def client_config(self, template_props=""):
return SecurityConfig(self.security_protocol, sasl_mechanism=self.sasl_mechanism, template_props=template_props)
def setup_node(self, node):
if self.has_ssl:
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
node.account.scp_to(SecurityConfig.ssl_stores['ssl.keystore.location'], SecurityConfig.KEYSTORE_PATH)
node.account.scp_to(SecurityConfig.ssl_stores['ssl.truststore.location'], SecurityConfig.TRUSTSTORE_PATH)
if self.has_sasl:
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
jaas_conf_file = self.sasl_mechanism.lower() + "_jaas.conf"
java_version = node.account.ssh_capture("java -version")
if any('IBM' in line for line in java_version):
is_ibm_jdk = True
else:
is_ibm_jdk = False
jaas_conf = self.render(jaas_conf_file, node=node, is_ibm_jdk=is_ibm_jdk)
node.account.create_file(SecurityConfig.JAAS_CONF_PATH, jaas_conf)
if self.has_sasl_kerberos:
node.account.scp_to(MiniKdc.LOCAL_KEYTAB_FILE, SecurityConfig.KEYTAB_PATH)
node.account.scp_to(MiniKdc.LOCAL_KRB5CONF_FILE, SecurityConfig.KRB5CONF_PATH)
def clean_node(self, node):
if self.security_protocol != SecurityConfig.PLAINTEXT:
node.account.ssh("rm -rf %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
def get_property(self, prop_name, template_props=""):
"""
Get property value from the string representation of
a properties file.
"""
value = None
for line in template_props.split("\n"):
items = line.split("=")
if len(items) == 2 and items[0].strip() == prop_name:
value = str(items[1].strip())
return value
def is_ssl(self, security_protocol):
return security_protocol == SecurityConfig.SSL or security_protocol == SecurityConfig.SASL_SSL
def is_sasl(self, security_protocol):
return security_protocol == SecurityConfig.SASL_PLAINTEXT or security_protocol == SecurityConfig.SASL_SSL
@property
def security_protocol(self):
return self.properties['security.protocol']
@property
def sasl_mechanism(self):
return self.properties['sasl.mechanism']
@property
def has_sasl_kerberos(self):
return self.has_sasl and self.sasl_mechanism == SecurityConfig.SASL_MECHANISM_GSSAPI
@property
def kafka_opts(self):
if self.has_sasl:
return "\"-Djava.security.auth.login.config=%s -Djava.security.krb5.conf=%s\"" % (SecurityConfig.JAAS_CONF_PATH, SecurityConfig.KRB5CONF_PATH)
else:
return ""
def __str__(self):
"""
Return properties as string with line separators.
This is used to append security config properties to
a properties file.
"""
prop_str = ""
if self.security_protocol != SecurityConfig.PLAINTEXT:
for key, value in self.properties.items():
prop_str += ("\n" + key + "=" + value)
prop_str += "\n"
return prop_str
| {
"content_hash": "5be545fb219dfe19871d4907805b1244",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 197,
"avg_line_length": 44.346820809248555,
"alnum_prop": 0.6458550573514077,
"repo_name": "samaitra/kafka",
"id": "b5efba81e4f803a65410e626d3b0e918decf0b48",
"size": "8453",
"binary": false,
"copies": "6",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/services/security/security_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "20094"
},
{
"name": "HTML",
"bytes": "5443"
},
{
"name": "Java",
"bytes": "3793143"
},
{
"name": "Python",
"bytes": "307205"
},
{
"name": "Scala",
"bytes": "2779461"
},
{
"name": "Shell",
"bytes": "43519"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
'''
Magnetic field lines
'''
import warnings
import traceback
msg, err = None, None
try:
import imas
except Exception as err:
if str(err) == 'imas not available':
msg = ""
msg += "\n\nIMAS python API issue\n"
msg += "imas could not be imported into tofu ('import imas' failed):\n"
msg += " - it may not be installed (optional dependency)\n"
msg += " - or you not have loaded the good working environment\n\n"
msg += " => the optional sub-package tofu.mag is not usable\n"
else:
msg = str(traceback.format_exc())
msg += "\n\n => the optional sub-package tofu.mag is not usable\n"
raise Exception(msg)
try:
import pywed
except Exception:
msg = "pywed not available => no tofu.mag"
raise Exception(msg)
try:
from tofu.mag.magFieldLines import *
except Exception:
try:
from .magFieldLines import *
except Exception:
msg = "Could not import"
raise Exception(msg)
del warnings, traceback, pywed, imas
del msg, err
__all__ = ['MagFieldLines']
| {
"content_hash": "8e2e4b3bf12df4a818ec9bcf1ae8df14",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.6182994454713494,
"repo_name": "Didou09/tofu",
"id": "b2a41d44a4d42288e9fa5841932c350e2e36907b",
"size": "1160",
"binary": false,
"copies": "2",
"ref": "refs/heads/takingoutcpp",
"path": "tofu/mag/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1056815"
},
{
"name": "TeX",
"bytes": "15104"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("promrep", "0054_rebuild_provinces_tree"),
]
operations = [
migrations.AddField(
model_name="person",
name="highest_office",
field=models.CharField(max_length=1024, null=True, blank=True),
),
migrations.AddField(
model_name="person",
name="highest_office_edited",
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "e6e525aa99dda0e1b71b96575ee5679c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.5748613678373382,
"repo_name": "kingsdigitallab/dprr-django",
"id": "4a3f16de2648df942c3c9cd0911835780b6f6009",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "promrep/migrations/0055_add_highest_office_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "788"
},
{
"name": "HTML",
"bytes": "99560"
},
{
"name": "JavaScript",
"bytes": "10628"
},
{
"name": "Jinja",
"bytes": "220"
},
{
"name": "Python",
"bytes": "668975"
},
{
"name": "SCSS",
"bytes": "73040"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
} |
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
import re
class guizhou_zhongbiao():
need_check_ziduan = [u'bbd_dotime',
u'main',
u'title',
u'pubdate',
u'company_name_invite',
u'bidwinning_pubdate'
]
def check_bbd_dotime(self, source, ustr):
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u'不合法日期'
return ret
def check_main(self, source, ustr):
ret = None
if ustr and len(ustr):
pass
else:
ret = u'为空'
return ret
def check_title(self, source, ustr):
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 2):
ret = u"没有两个汉字"
else:
ret = u'为空'
return ret
def check_pubdate(self, source, ustr):
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
else:
ret = u'为空'
return ret
def check_company_name_invite(self, source, ustr):
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 2) \
and not public.has_count_en(ustr, 2):
ret = u"没有2个汉字又没有2个英文"
else:
ret = u'为空'
return ret
def check_bidwinning_pubdate(self, source, ustr):
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
return ret
| {
"content_hash": "da0672df172f580456838b6c7d6239f3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 57,
"avg_line_length": 25.96969696969697,
"alnum_prop": 0.4719953325554259,
"repo_name": "mefly2012/platform",
"id": "c7e1977fc154bcb4f15bb3afda1bb2fbc76ffc3c",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clean_validate/guizhou_zhongbiao.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "742630"
}
],
"symlink_target": ""
} |
from datetime import datetime
from flask import jsonify, Blueprint
from dto.semester import YearSemester
from scraping.handlers.course_results_page import get_course_results
course_blueprint = Blueprint('course', __name__)
@course_blueprint.route('/course/<parameter_course_code>')
def course(parameter_course_code):
course_code = parameter_course_code.upper()
current_year = datetime.now().year
current_season = 'VÅR' if datetime.now().month < 8 else 'HØST'
default_year_semester = YearSemester(current_year, current_season)
course_results = get_course_results(course_code, default_year_semester, default_year_semester)
return jsonify(**course_results.to_dict()) | {
"content_hash": "cc5c1764592e5db1620305ba7e7bcca2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 35.526315789473685,
"alnum_prop": 0.7748148148148148,
"repo_name": "Aqwis/karstat-wrapper",
"id": "80c13e66eafab4c7339f258ee8255825e5f81a7c",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "views/course.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8530"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.