text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Python 2.7 collections module compatibility for 2.6."""
# pylint: disable=wildcard-import
from googlecloudsdk.third_party.py27.mirror.collections import *
|
{
"content_hash": "546e06e18d44fbf9b0eff03123882296",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 64,
"avg_line_length": 39.75,
"alnum_prop": 0.7861635220125787,
"repo_name": "flgiordano/netcash",
"id": "46e053f8a1b7ee5b2c1180e599017396ae70b3db",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/third_party/py27/py27_collections.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
}
|
import hashlib
import hmac
import random
import string
def rand_salt():
return "".join( random.choice(string.letters) for x in range(5) ) # generate a random 5-letter string
def make_secure_val(username , salt = None ):
if not salt:
salt = rand_salt()
h = hmac.new(salt, username).hexdigest()
return '%s|%s|%s' % (username, h, salt)
def check_secure_val(h):
try:
username = h.split('|')[0]
if make_secure_val(username, h.split('|')[2]) == h:
return username
except:
pass
def hash_pass(password, salt = None):
if not salt:
salt = rand_salt()
h = hashlib.sha256(salt + password).hexdigest()
return '%s,%s' % (h, salt)
def check_hash_pass(password , hashPass):
try:
salt = hashPass.split(',')[1]
if salt:
return hash_pass(password , salt) == hashPass
except:
pass
|
{
"content_hash": "0fb6e73b0ba63652aeb4801d4f6ebd63",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 107,
"avg_line_length": 25.91891891891892,
"alnum_prop": 0.5495307612095933,
"repo_name": "divkakwani/college-administration",
"id": "ba7908b9b772f20166dcf75a00bc0c3cf54febc7",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fid/myhashlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1030"
},
{
"name": "HTML",
"bytes": "11518"
},
{
"name": "Python",
"bytes": "12729"
}
],
"symlink_target": ""
}
|
import os
import time
import sys
import traceback
"""
ScriptLog.py - A scripting library that allows for out put to the screen, a file or both. You can use different parts of this package
to tag what you are doing at the time of the logging. It takes logging one step farther by also showing the trace back information.
Logger - Does the main work but is not called directly.
append, adds to the end of the log
__checkStream - Check to see if the stream and log file is already initialized
end - Closes the stream and file
__init__ - does nothing
openLog - opens or creates the path and log file.
handleException -
"""
lineSeparator = os.linesep
fileSeparator = os.pathsep
stars = "*" * 60
class Logger:
__stream = None
__isOpen = 0
__isSystemStream = 0
def append( self, s ):
if self.__checkStream():
self.__stream.write( s )
def __checkStream( self ):
if self.__stream == None:
print "Log file has NOT been explicitly initialized, creating default log file"
self.openLog( ".", "script.log" )
return ( not self.__stream == None )
def end( self ):
if self.__checkStream():
try:
self.__stream.flush()
if not self.__isSystemStream:
self.__stream.close()
self.__stream = None
except:
pass
def __init__ ( self ):
pass
def openLog( self, logdir, logfilename, logtruncate = 1 ):
if self.__stream == None:
if logtruncate:
mode = "w"
else:
mode = "a+"
if logfilename == None and logdir == None:
self.__stream = sys.stdout
self.__isOpen = 1
self.__isSystemStream = 1
else:
if not os.path.isdir( logdir ):
print "Log directory %s does not exist" % logdir
try:
os.mkdir( logdir )
except:
print "Unable to create log directory"
return
if not os.path.isdir( logdir ):
print "%s is not a directory" % logdir
return
if not os.access( logdir, os.W_OK ):
print "Directory %s is not writable" % logdir
return
fullfilename = os.path.join( logdir , logfilename )
print( "Detailed log will be written to file %s" % fullfilename )
try:
self.__stream = open( fullfilename, mode, 0 )
self.__isOpen = 1
except:
print( "Cannot open log file %s, logging will be disabled" % fullfilename )
else:
print ( "Log file already open" )
""" End of the Logger Class"""
def handleException( method ):
#
# Handles an exception:
# Prints exception message, closes log and exits
#
msg = "An exception has occurred in %s.\n \
Exception type: %s \n \
Exception value: %s \n \
The filename, line number, function, statement and value of the exception are: \n \
%s \n \
Please make any necessary corrections and try again.\n" % ( method, sys.exc_info()[0], sys.exc_info()[1], traceback.extract_tb( sys.exc_info()[2] ) )
log2( msg )
def __get_current_traceback():
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
list = []
limit = 10
n = 0
while f is not None and ( limit is None or n < limit ):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
# strip off .py extension
st_fname = filename.replace( ".py", "" )
ind = st_fname.rfind( "\\" )
if ind != -1:
fname = st_fname[ind + 1:]
else:
fname = st_fname.split( "/" )[-1]
name = co.co_name
list.append( ( fname, lineno, name ) )
f = f.f_back
n = n + 1
return list
def entry ( message = "" ):
log ( message, ">" )
def info ( message = "" ):
log ( message, "I" )
def error( message ):
log ( message, "E" )
def warning( message ):
log ( message, "W" )
def exit ( message = "" ):
log ( message, "<" )
def debug ( message ):
log ( message, "D" )
def banner( msg ):
log ( stars )
log ( "*" )
log ( msg )
log ( "*" )
log ( stars )
# Flushes and closes log file.
def closeLog():
banner ( "Log file closed" )
logger.end()
def close():
closeLog()
#**********************************************************************
# Logging routines.
# Use log instead of puts to log a message.
# For important message use log2, it logs a message into a file + show on screen
#**********************************************************************
# Message will be logged to logfile
# Use log2 to copy to stdout
def log ( message = "", level = "I", addeol = 1 ):
line = 0
modul = ""
func = ""
stack = __get_current_traceback()
for fr in stack:
modul = fr[0]
if modul != __name__:
line = fr[1]
func = fr[2]
break
try:
if modul == "<string>":
modul = "<main_script>"
elif not modul.endswith( "py" ):
modul += ".py"
formatted_msg = "%s |%s| %s (%s:%d) %s" % ( time.ctime( time.time() ), level, func, modul, line, message )
logger.append( formatted_msg )
if ( addeol ):
logger.append( lineSeparator )
except:
# we want to ignore logging errors
print ( message )
raise
# Logs message to logfile AND stdout
def log2 ( message ):
print( message )
log ( message )
def lopen( fullFileName, dir = "/var/log/" ):
if ( fullFileName == "stdout" ):
logger.openLog ( None, None, 0 )
else:
print "opening log file: %s/%s" % ( dir, fullFileName )
logger.openLog ( dir, fullFileName, 0 )
banner ( "Log file open " )
logger = Logger()
|
{
"content_hash": "5fc41a12cec4fd1972b64563fc8d59ee",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 157,
"avg_line_length": 31.2979797979798,
"alnum_prop": 0.5149265773761498,
"repo_name": "wags007/BIND_DHCP_to_dnsmasq",
"id": "cc4ebce7a02a9fb76309e0ef928e17b42b47ec03",
"size": "6197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScriptLog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16665"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include # noqa
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.networks.agents \
import views as agent_views
from openstack_dashboard.dashboards.admin.networks.ports \
import urls as port_urls
from openstack_dashboard.dashboards.admin.networks.ports \
import views as port_views
from openstack_dashboard.dashboards.admin.networks.subnets \
import urls as subnet_urls
from openstack_dashboard.dashboards.admin.networks.subnets \
import views as subnet_views
from openstack_dashboard.dashboards.admin.networks import views
NETWORKS = r'^(?P<network_id>[^/]+)/%s$'
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'),
# for detail view
url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'),
url(NETWORKS % 'agents/add',
agent_views.AddView.as_view(), name='adddhcpagent'),
url(NETWORKS % 'subnets/create',
subnet_views.CreateView.as_view(), name='addsubnet'),
url(NETWORKS % 'ports/create',
port_views.CreateView.as_view(), name='addport'),
url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$',
subnet_views.UpdateView.as_view(), name='editsubnet'),
url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$',
port_views.UpdateView.as_view(), name='editport'),
url(r'^subnets/', include(subnet_urls, namespace='subnets')),
url(r'^ports/', include(port_urls, namespace='ports')))
|
{
"content_hash": "695a98547876287aef61c1cf74c62eb0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 42.94871794871795,
"alnum_prop": 0.6847761194029851,
"repo_name": "intlabs/cannyos-backend-dashboard",
"id": "09318cc4b7c702f59366f0c01ffa5eb649d45112",
"size": "2283",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "horizon-master/openstack_dashboard/dashboards/admin/networks/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63813"
},
{
"name": "JavaScript",
"bytes": "235367"
},
{
"name": "Perl",
"bytes": "597"
},
{
"name": "Prolog",
"bytes": "11189"
},
{
"name": "Python",
"bytes": "4001210"
},
{
"name": "Shell",
"bytes": "27732"
}
],
"symlink_target": ""
}
|
import re
class XPathTranslator(object):
def __init__(self, selector):
self.selector = selector
def get_selector(self):
sel = self.selector
sel = self.do_translations(sel)
sel = self.do_fixes(sel)
return sel
def do_translations(self, sel):
sel = self._translate_contains_word(sel)
sel = self._translate_endswith(sel)
sel = self._translate_contains_prefix(sel)
sel = self._translate_attrs(sel)
sel = self._translate_ids(sel)
sel = self._translate_classes(sel)
sel = self._translate_parents(sel)
return sel
def do_fixes(self, sel):
sel = self._fix_asterisks(sel)
sel = self._fix_bars(sel)
sel = self._fix_attrs(sel)
sel = self._fix_direct_childs(sel)
sel = self._fix_attr_startswith(sel)
sel = self._fix_attr_contains(sel)
sel = self._fix_attr_or(sel)
return sel
def _translate_contains_word(self, selector):
regex = re.compile(r'\[([^\s~]+)[~]="?([^\s"]+)"?\]')
sel = regex.sub("[@\g<1>='\g<2>' or contains(@\g<1>, '\g<2> ') or contains(@\g<1>, ' \g<2>')]", selector)
return sel
def _translate_endswith(self, selector):
regex = re.compile(r'\[([^\s$]+)[$]="?([^\s"]+)"?\]')
sel = regex.sub("[ends-with(@\g<1>, '\g<2>')]", selector)
return sel
def _translate_contains_prefix(self, selector):
regex = re.compile(r'\[([^\s|]+)[|]="?([^\s"]+)"?\]')
sel = regex.sub("[starts-with(@\g<1>, '\g<2>-')]", selector)
return sel
def _translate_attrs(self, selector):
regex = re.compile(r'\[(\S+)="?([^\s"]+)"?\]')
sel = regex.sub("[@\g<1>='\g<2>']", selector)
return sel
def _translate_ids(self, selector):
regex = re.compile(r'[#]([^ \[]+)')
return regex.sub("[@id='\g<1>']", selector)
def _translate_classes(self, selector):
regex = re.compile(r'[.]([^ .\[]+)')
sel = regex.sub("[contains(@class, '\g<1>')]", selector)
return sel
def _translate_parents(self, selector):
return "//%s" % ("//".join(selector.split()))
def _fix_asterisks(self, selector):
regex = re.compile(r'[/]{2}\[')
return regex.sub("//*[", selector)
def _fix_bars(self, selector):
return selector.replace("//'", "'")
def _fix_attrs(self, selector):
sel = selector.replace("][", " and ")
return sel
def _fix_direct_childs(self, selector):
sel = selector.replace("//>//", "/")
return sel
def _fix_attr_startswith(self, selector):
regex = re.compile(r"([@]\w+)\^\='(.*)'")
sel = regex.sub("starts-with(\g<1>, '\g<2>')", selector)
return sel
def _fix_attr_contains(self, selector):
regex = re.compile(r"([@]\w+)[*]='(.*)'")
sel = regex.sub("contains(\g<1>, '\g<2>')", selector)
return sel
def _fix_attr_or(self, selector):
return selector.replace('//or//', ' or ')
@property
def path(self):
return self.get_selector()
|
{
"content_hash": "26dbcfd797a716bd85680ace8c67365c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 113,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5248067010309279,
"repo_name": "gabrielfalcao/dominic",
"id": "aa5e73c299df9ce2c0c43e69131fa001ddb06db1",
"size": "4338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dominic/css.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "216538"
}
],
"symlink_target": ""
}
|
"""Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from functools import partial
from itertools import cycle
import numpy as np
from ..io.constants import Bunch
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names, warn
from ..channels.layout import _merge_grad_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale)
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False):
"""Private helper to iterate over topography
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.set(xlim=[0, 1], ylim=[0, 1])
under_ax.axis('off')
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', axis_facecolor='k', fig_facecolor='k',
cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
y_label=None, font_color='w', unified=False, img=False):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
ax.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None or (not hasattr(orig_ax, '_mne_ch_idx') and
not hasattr(orig_ax, '_mne_axs')):
return
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot"""
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap='RdBu_r', title=None,
hline=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
extent = (tmin, tmax, freq[0], freq[-1])
img = ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if isinstance(ax, plt.Axes):
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
else:
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
if title:
plt.title(title)
if not isinstance(ax, plt.Axes):
ax = plt.gca()
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Aux function to show multiple tfrs on topo using a single axes"""
_compute_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None):
"""Aux function to show time series on topo split across multiple axes"""
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
for x in vline:
plt.axvline(x, color='w', linewidth=0.5)
if hline:
for y in hline:
plt.axhline(y, color='w', linewidth=0.5)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
if isinstance(y_label, list):
plt.ylabel(y_label[ch_idx])
else:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None):
"""Aux function to show multiple time series on topo using a single axes"""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = np.array([np.min(data), np.max(data)])
# Translation and scale parameters to take data->under_ax normalized coords
_compute_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_ in zip(data, color):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times, bn.y_t + bn.y_s * data_[ch_idx],
color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color='w', linewidth=0.5)
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color='w', linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r'):
"""Aux function to plot erfimage on sensor topography"""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True, cmap=cmap,
interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar()
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r'):
"""Aux function to plot erfimage topography using a single axis"""
from scipy import ndimage
_compute_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :].copy()
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_grads=False, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel is determined by the maximum
absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warn('More evoked objects than colors available. You should pass '
'a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all((e.times == times).all() for e in evoked):
raise ValueError('All evoked.times must be the same')
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_grads:
picks = _pair_grad_sensors(info, topomap_coords=False)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
info['chs'] = chs
info['bads'] = list() # bads dropped on pair_grad_sensors
info._update_redundant()
info._check_consistency()
new_picks = list()
for e in evoked:
data = _merge_grad_data(e.data[picks]) * scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
y_label = 'RMS amplitude (%s)' % _handle_default('units')['grad']
if layout is None:
layout = find_layout(info)
if not merge_grads:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = set(('mag', 'grad'))
is_meg = len(set.intersection(types_used, meg_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, exclude=[],
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = ['Amplitude (%s)' % _handle_default('units')[channel_type(
info, ch_idx)] for ch_idx in range(len(chs_in_layout))]
if ylim is None:
def set_ylim(x):
return np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
data = [e.data for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data,
color=color, times=times, vline=vline, hline=hline)
click_func = partial(_plot_timeseries, data=data,
color=color, times=times, vline=vline, hline=hline)
fig = _plot_topo(info=info, times=times, show_func=show_func,
click_func=click_func, layout=layout,
colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor, title=title,
x_label='Time (s)', y_label=y_label, unified=True)
if fig_background is not None:
add_background_image(fig, fig_background)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Helper function to update topo sensor plots"""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _handle_default('scalings', scalings)
data = epochs.get_data()
scale_coeffs = list()
for idx in range(epochs.info['nchan']):
ch_type = channel_type(epochs.info, idx)
scale_coeffs.append(scalings.get(ch_type, 1))
for epoch_data in data:
epoch_data *= np.asarray(scale_coeffs)[:, np.newaxis]
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if layout is None:
layout = find_layout(epochs.info)
show_func = partial(_erfimage_imshow_unified, scalings=scalings,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
plt_show(show)
return fig
|
{
"content_hash": "0b5eaa70d7b8e33c5a20949c9d6ff393",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 79,
"avg_line_length": 40.01398601398601,
"alnum_prop": 0.581999300943726,
"repo_name": "wronk/mne-python",
"id": "3bcad971d0887a2ca5fc1d8412945071734edb34",
"size": "28610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/viz/topo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5079143"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import collections.abc as collections_abc
import itertools
from .. import AssertsCompiledSQL
from .. import AssertsExecutionResults
from .. import config
from .. import fixtures
from ..assertions import assert_raises
from ..assertions import eq_
from ..assertions import in_
from ..assertsql import CursorSQL
from ..schema import Column
from ..schema import Table
from ... import bindparam
from ... import case
from ... import column
from ... import Computed
from ... import exists
from ... import false
from ... import ForeignKey
from ... import func
from ... import Identity
from ... import Integer
from ... import literal
from ... import literal_column
from ... import null
from ... import select
from ... import String
from ... import table
from ... import testing
from ... import text
from ... import true
from ... import tuple_
from ... import TupleType
from ... import union
from ... import values
from ...exc import DatabaseError
from ...exc import ProgrammingError
class CollateTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(100)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "collate data1"},
{"id": 2, "data": "collate data2"},
],
)
def _assert_result(self, select, result):
with config.db.connect() as conn:
eq_(conn.execute(select).fetchall(), result)
@testing.requires.order_by_collation
def test_collate_order_by(self):
collation = testing.requires.get_order_by_collation(testing.config)
self._assert_result(
select(self.tables.some_table).order_by(
self.tables.some_table.c.data.collate(collation).asc()
),
[(1, "collate data1"), (2, "collate data2")],
)
class OrderByLabelTest(fixtures.TablesTest):
"""Test the dialect sends appropriate ORDER BY expressions when
labels are used.
This essentially exercises the "supports_simple_order_by_label"
setting.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("q", String(50)),
Column("p", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
{"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
{"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
],
)
def _assert_result(self, select, result):
with config.db.connect() as conn:
eq_(conn.execute(select).fetchall(), result)
def test_plain(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select(lx).order_by(lx), [(1,), (2,), (3,)])
def test_composed_int(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select(lx).order_by(lx), [(3,), (5,), (7,)])
def test_composed_multiple(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
ly = (func.lower(table.c.q) + table.c.p).label("ly")
self._assert_result(
select(lx, ly).order_by(lx, ly.desc()),
[(3, "q1p3"), (5, "q2p2"), (7, "q3p1")],
)
def test_plain_desc(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select(lx).order_by(lx.desc()), [(3,), (2,), (1,)])
def test_composed_int_desc(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select(lx).order_by(lx.desc()), [(7,), (5,), (3,)])
@testing.requires.group_by_complex_expression
def test_group_by_composed(self):
table = self.tables.some_table
expr = (table.c.x + table.c.y).label("lx")
stmt = (
select(func.count(table.c.id), expr).group_by(expr).order_by(expr)
)
self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])
class ValuesExpressionTest(fixtures.TestBase):
__requires__ = ("table_value_constructor",)
__backend__ = True
def test_tuples(self, connection):
value_expr = values(
column("id", Integer), column("name", String), name="my_values"
).data([(1, "name1"), (2, "name2"), (3, "name3")])
eq_(
connection.execute(select(value_expr)).all(),
[(1, "name1"), (2, "name2"), (3, "name3")],
)
class FetchLimitOffsetTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
{"id": 5, "x": 4, "y": 6},
],
)
def _assert_result(
self, connection, select, result, params=(), set_=False
):
if set_:
query_res = connection.execute(select, params).fetchall()
eq_(len(query_res), len(result))
eq_(set(query_res), set(result))
else:
eq_(connection.execute(select, params).fetchall(), result)
def _assert_result_str(self, select, result, params=()):
with config.db.connect() as conn:
eq_(conn.exec_driver_sql(select, params).fetchall(), result)
def test_simple_limit(self, connection):
table = self.tables.some_table
stmt = select(table).order_by(table.c.id)
self._assert_result(
connection,
stmt.limit(2),
[(1, 1, 2), (2, 2, 3)],
)
self._assert_result(
connection,
stmt.limit(3),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
def test_limit_render_multiple_times(self, connection):
table = self.tables.some_table
stmt = select(table.c.id).limit(1).scalar_subquery()
u = union(select(stmt), select(stmt)).subquery().select()
self._assert_result(
connection,
u,
[
(1,),
],
)
@testing.requires.fetch_first
def test_simple_fetch(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(2),
[(1, 1, 2), (2, 2, 3)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(3),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
@testing.requires.offset
def test_simple_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(2),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(3),
[(4, 4, 5), (5, 4, 6)],
)
@testing.combinations(
([(2, 0), (2, 1), (3, 2)]),
([(2, 1), (2, 0), (3, 2)]),
([(3, 1), (2, 1), (3, 1)]),
argnames="cases",
)
@testing.requires.offset
def test_simple_limit_offset(self, connection, cases):
table = self.tables.some_table
connection = connection.execution_options(compiled_cache={})
assert_data = [(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)]
for limit, offset in cases:
expected = assert_data[offset : offset + limit]
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(limit).offset(offset),
expected,
)
@testing.requires.fetch_first
def test_simple_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(2).offset(1),
[(2, 2, 3), (3, 3, 4)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(3).offset(2),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.fetch_no_order_by
def test_fetch_offset_no_order(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).fetch(10),
[(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
set_=True,
)
@testing.requires.offset
def test_simple_offset_zero(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(0),
[(1, 1, 2), (2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(1),
[(2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select(table).order_by(table.c.id).limit(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result_str(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.fetch_first
def test_fetch_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select(table).order_by(table.c.id).fetch(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result_str(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.bound_limit_offset
def test_bound_limit(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3)],
params={"l": 2},
)
self._assert_result(
connection,
select(table).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
params={"l": 3},
)
@testing.requires.bound_limit_offset
def test_bound_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"o": 2},
)
self._assert_result(
connection,
select(table).order_by(table.c.id).offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"o": 1},
)
@testing.requires.bound_limit_offset
def test_bound_limit_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1},
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"l": 3, "o": 2},
)
@testing.requires.fetch_first
def test_bound_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(bindparam("f"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"f": 2, "o": 1},
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(bindparam("f"))
.offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
params={"f": 3, "o": 2},
)
@testing.requires.sql_expression_limit_offset
def test_expr_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.offset(literal_column("1") + literal_column("2")),
[(4, 4, 5), (5, 4, 6)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("2")),
[(1, 1, 2), (2, 2, 3), (3, 3, 4)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.fetch_first
@testing.requires.fetch_expression
def test_expr_fetch_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(literal_column("1") + literal_column("1"))
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.sql_expression_limit_offset
def test_simple_limit_expr_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(2)
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(3)
.offset(literal_column("1") + literal_column("1")),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
)
@testing.requires.sql_expression_limit_offset
def test_expr_limit_simple_offset(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(2),
[(3, 3, 4), (4, 4, 5)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.limit(literal_column("1") + literal_column("1"))
.offset(1),
[(2, 2, 3), (3, 3, 4)],
)
@testing.requires.fetch_ties
def test_simple_fetch_ties(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.x.desc()).fetch(1, with_ties=True),
[(4, 4, 5), (5, 4, 6)],
set_=True,
)
self._assert_result(
connection,
select(table).order_by(table.c.x.desc()).fetch(3, with_ties=True),
[(3, 3, 4), (4, 4, 5), (5, 4, 6)],
set_=True,
)
@testing.requires.fetch_ties
@testing.requires.fetch_offset_with_options
def test_fetch_offset_ties(self, connection):
table = self.tables.some_table
fa = connection.execute(
select(table)
.order_by(table.c.x)
.fetch(2, with_ties=True)
.offset(2)
).fetchall()
eq_(fa[0], (3, 3, 4))
eq_(set(fa), {(3, 3, 4), (4, 4, 5), (5, 4, 6)})
@testing.requires.fetch_ties
@testing.requires.fetch_offset_with_options
def test_fetch_offset_ties_exact_number(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.x)
.fetch(2, with_ties=True)
.offset(1),
[(2, 2, 3), (3, 3, 4)],
)
self._assert_result(
connection,
select(table)
.order_by(table.c.x)
.fetch(3, with_ties=True)
.offset(3),
[(4, 4, 5), (5, 4, 6)],
)
@testing.requires.fetch_percent
def test_simple_fetch_percent(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table).order_by(table.c.id).fetch(20, percent=True),
[(1, 1, 2)],
)
@testing.requires.fetch_percent
@testing.requires.fetch_offset_with_options
def test_fetch_offset_percent(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.id)
.fetch(40, percent=True)
.offset(1),
[(2, 2, 3), (3, 3, 4)],
)
@testing.requires.fetch_ties
@testing.requires.fetch_percent
def test_simple_fetch_percent_ties(self, connection):
table = self.tables.some_table
self._assert_result(
connection,
select(table)
.order_by(table.c.x.desc())
.fetch(20, percent=True, with_ties=True),
[(4, 4, 5), (5, 4, 6)],
set_=True,
)
@testing.requires.fetch_ties
@testing.requires.fetch_percent
@testing.requires.fetch_offset_with_options
def test_fetch_offset_percent_ties(self, connection):
table = self.tables.some_table
fa = connection.execute(
select(table)
.order_by(table.c.x)
.fetch(40, percent=True, with_ties=True)
.offset(2)
).fetchall()
eq_(fa[0], (3, 3, 4))
eq_(set(fa), {(3, 3, 4), (4, 4, 5), (5, 4, 6)})
class SameNamedSchemaTableTest(fixtures.TablesTest):
"""tests for #7471"""
__backend__ = True
__requires__ = ("schemas",)
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
schema=config.test_schema,
)
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"some_table_id",
Integer,
# ForeignKey("%s.some_table.id" % config.test_schema),
nullable=False,
),
)
@classmethod
def insert_data(cls, connection):
some_table, some_table_schema = cls.tables(
"some_table", "%s.some_table" % config.test_schema
)
connection.execute(some_table_schema.insert(), {"id": 1})
connection.execute(some_table.insert(), {"id": 1, "some_table_id": 1})
def test_simple_join_both_tables(self, connection):
some_table, some_table_schema = self.tables(
"some_table", "%s.some_table" % config.test_schema
)
eq_(
connection.execute(
select(some_table, some_table_schema).join_from(
some_table,
some_table_schema,
some_table.c.some_table_id == some_table_schema.c.id,
)
).first(),
(1, 1, 1),
)
def test_simple_join_whereclause_only(self, connection):
some_table, some_table_schema = self.tables(
"some_table", "%s.some_table" % config.test_schema
)
eq_(
connection.execute(
select(some_table)
.join_from(
some_table,
some_table_schema,
some_table.c.some_table_id == some_table_schema.c.id,
)
.where(some_table.c.id == 1)
).first(),
(1, 1),
)
def test_subquery(self, connection):
some_table, some_table_schema = self.tables(
"some_table", "%s.some_table" % config.test_schema
)
subq = (
select(some_table)
.join_from(
some_table,
some_table_schema,
some_table.c.some_table_id == some_table_schema.c.id,
)
.where(some_table.c.id == 1)
.subquery()
)
eq_(
connection.execute(
select(some_table, subq.c.id)
.join_from(
some_table,
subq,
some_table.c.some_table_id == subq.c.id,
)
.where(some_table.c.id == 1)
).first(),
(1, 1, 1),
)
class JoinTest(fixtures.TablesTest):
__backend__ = True
def _assert_result(self, select, result, params=()):
with config.db.connect() as conn:
eq_(conn.execute(select, params).fetchall(), result)
@classmethod
def define_tables(cls, metadata):
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id"), nullable=False),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.a.insert(),
[{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}],
)
connection.execute(
cls.tables.b.insert(),
[
{"id": 1, "a_id": 1},
{"id": 2, "a_id": 1},
{"id": 4, "a_id": 2},
{"id": 5, "a_id": 3},
],
)
def test_inner_join_fk(self):
a, b = self.tables("a", "b")
stmt = select(a, b).select_from(a.join(b)).order_by(a.c.id, b.c.id)
self._assert_result(stmt, [(1, 1, 1), (1, 2, 1), (2, 4, 2), (3, 5, 3)])
def test_inner_join_true(self):
a, b = self.tables("a", "b")
stmt = (
select(a, b)
.select_from(a.join(b, true()))
.order_by(a.c.id, b.c.id)
)
self._assert_result(
stmt,
[
(a, b, c)
for (a,), (b, c) in itertools.product(
[(1,), (2,), (3,), (4,), (5,)],
[(1, 1), (2, 1), (4, 2), (5, 3)],
)
],
)
def test_inner_join_false(self):
a, b = self.tables("a", "b")
stmt = (
select(a, b)
.select_from(a.join(b, false()))
.order_by(a.c.id, b.c.id)
)
self._assert_result(stmt, [])
def test_outer_join_false(self):
a, b = self.tables("a", "b")
stmt = (
select(a, b)
.select_from(a.outerjoin(b, false()))
.order_by(a.c.id, b.c.id)
)
self._assert_result(
stmt,
[
(1, None, None),
(2, None, None),
(3, None, None),
(4, None, None),
(5, None, None),
],
)
def test_outer_join_fk(self):
a, b = self.tables("a", "b")
stmt = select(a, b).select_from(a.join(b)).order_by(a.c.id, b.c.id)
self._assert_result(stmt, [(1, 1, 1), (1, 2, 1), (2, 4, 2), (3, 5, 3)])
class CompoundSelectTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=()):
with config.db.connect() as conn:
eq_(conn.execute(select, params).fetchall(), result)
def test_plain_union(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2)
s2 = select(table).where(table.c.id == 3)
u1 = union(s1, s2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
def test_select_from_plain_union(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2)
s2 = select(table).where(table.c.id == 3)
u1 = union(s1, s2).alias().select()
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.order_by_col_from_union
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).limit(1).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).limit(1).order_by(table.c.id)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.parens_in_union_contained_select_wo_limit_offset
def test_order_by_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).order_by(table.c.id)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
def test_distinct_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).distinct()
s2 = select(table).where(table.c.id == 3).distinct()
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_in_unions_from_alias(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).limit(1).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).limit(1).order_by(table.c.id)
# this necessarily has double parens
u1 = union(s1, s2).alias()
self._assert_result(
u1.select().limit(2).order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]
)
def test_limit_offset_aliased_selectable_in_unions(self):
table = self.tables.some_table
s1 = (
select(table)
.where(table.c.id == 2)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
s2 = (
select(table)
.where(table.c.id == 3)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
class PostCompileParamsTest(
AssertsExecutionResults, AssertsCompiledSQL, fixtures.TablesTest
):
__backend__ = True
__requires__ = ("standard_cursor_sql",)
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("z", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "z": "z1"},
{"id": 2, "x": 2, "y": 3, "z": "z2"},
{"id": 3, "x": 3, "y": 4, "z": "z3"},
{"id": 4, "x": 4, "y": 5, "z": "z4"},
],
)
def test_compile(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
table.c.x == bindparam("q", literal_execute=True)
)
self.assert_compile(
stmt,
"SELECT some_table.id FROM some_table "
"WHERE some_table.x = __[POSTCOMPILE_q]",
{},
)
def test_compile_literal_binds(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
table.c.x == bindparam("q", 10, literal_execute=True)
)
self.assert_compile(
stmt,
"SELECT some_table.id FROM some_table WHERE some_table.x = 10",
{},
literal_binds=True,
)
def test_execute(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
table.c.x == bindparam("q", literal_execute=True)
)
with self.sql_execution_asserter() as asserter:
with config.db.connect() as conn:
conn.execute(stmt, dict(q=10))
asserter.assert_(
CursorSQL(
"SELECT some_table.id \nFROM some_table "
"\nWHERE some_table.x = 10",
() if config.db.dialect.positional else {},
)
)
def test_execute_expanding_plus_literal_execute(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
table.c.x.in_(bindparam("q", expanding=True, literal_execute=True))
)
with self.sql_execution_asserter() as asserter:
with config.db.connect() as conn:
conn.execute(stmt, dict(q=[5, 6, 7]))
asserter.assert_(
CursorSQL(
"SELECT some_table.id \nFROM some_table "
"\nWHERE some_table.x IN (5, 6, 7)",
() if config.db.dialect.positional else {},
)
)
@testing.requires.tuple_in
def test_execute_tuple_expanding_plus_literal_execute(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
tuple_(table.c.x, table.c.y).in_(
bindparam("q", expanding=True, literal_execute=True)
)
)
with self.sql_execution_asserter() as asserter:
with config.db.connect() as conn:
conn.execute(stmt, dict(q=[(5, 10), (12, 18)]))
asserter.assert_(
CursorSQL(
"SELECT some_table.id \nFROM some_table "
"\nWHERE (some_table.x, some_table.y) "
"IN (%s(5, 10), (12, 18))"
% ("VALUES " if config.db.dialect.tuple_in_values else ""),
() if config.db.dialect.positional else {},
)
)
@testing.requires.tuple_in
def test_execute_tuple_expanding_plus_literal_heterogeneous_execute(self):
table = self.tables.some_table
stmt = select(table.c.id).where(
tuple_(table.c.x, table.c.z).in_(
bindparam("q", expanding=True, literal_execute=True)
)
)
with self.sql_execution_asserter() as asserter:
with config.db.connect() as conn:
conn.execute(stmt, dict(q=[(5, "z1"), (12, "z3")]))
asserter.assert_(
CursorSQL(
"SELECT some_table.id \nFROM some_table "
"\nWHERE (some_table.x, some_table.z) "
"IN (%s(5, 'z1'), (12, 'z3'))"
% ("VALUES " if config.db.dialect.tuple_in_values else ""),
() if config.db.dialect.positional else {},
)
)
class ExpandingBoundInTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("z", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "z": "z1"},
{"id": 2, "x": 2, "y": 3, "z": "z2"},
{"id": 3, "x": 3, "y": 4, "z": "z3"},
{"id": 4, "x": 4, "y": 5, "z": "z4"},
],
)
def _assert_result(self, select, result, params=()):
with config.db.connect() as conn:
eq_(conn.execute(select, params).fetchall(), result)
def test_multiple_empty_sets_bindparam(self):
# test that any anonymous aliasing used by the dialect
# is fine with duplicates
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_(bindparam("q")))
.where(table.c.y.in_(bindparam("p")))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": [], "p": []})
def test_multiple_empty_sets_direct(self):
# test that any anonymous aliasing used by the dialect
# is fine with duplicates
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_([]))
.where(table.c.y.in_([]))
.order_by(table.c.id)
)
self._assert_result(stmt, [])
@testing.requires.tuple_in_w_empty
def test_empty_heterogeneous_tuples_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.z).in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
@testing.requires.tuple_in_w_empty
def test_empty_heterogeneous_tuples_direct(self):
table = self.tables.some_table
def go(val, expected):
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.z).in_(val))
.order_by(table.c.id)
)
self._assert_result(stmt, expected)
go([], [])
go([(2, "z2"), (3, "z3"), (4, "z4")], [(2,), (3,), (4,)])
go([], [])
@testing.requires.tuple_in_w_empty
def test_empty_homogeneous_tuples_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.y).in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
@testing.requires.tuple_in_w_empty
def test_empty_homogeneous_tuples_direct(self):
table = self.tables.some_table
def go(val, expected):
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.y).in_(val))
.order_by(table.c.id)
)
self._assert_result(stmt, expected)
go([], [])
go([(1, 2), (2, 3), (3, 4)], [(1,), (2,), (3,)])
go([], [])
def test_bound_in_scalar_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [(2,), (3,), (4,)], params={"q": [2, 3, 4]})
def test_bound_in_scalar_direct(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_([2, 3, 4]))
.order_by(table.c.id)
)
self._assert_result(stmt, [(2,), (3,), (4,)])
def test_nonempty_in_plus_empty_notin(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_([2, 3]))
.where(table.c.id.not_in([]))
.order_by(table.c.id)
)
self._assert_result(stmt, [(2,), (3,)])
def test_empty_in_plus_notempty_notin(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_([]))
.where(table.c.id.not_in([2, 3]))
.order_by(table.c.id)
)
self._assert_result(stmt, [])
def test_typed_str_in(self):
"""test related to #7292.
as a type is given to the bound param, there is no ambiguity
to the type of element.
"""
stmt = text(
"select id FROM some_table WHERE z IN :q ORDER BY id"
).bindparams(bindparam("q", type_=String, expanding=True))
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={"q": ["z2", "z3", "z4"]},
)
def test_untyped_str_in(self):
"""test related to #7292.
for untyped expression, we look at the types of elements.
Test for Sequence to detect tuple in. but not strings or bytes!
as always....
"""
stmt = text(
"select id FROM some_table WHERE z IN :q ORDER BY id"
).bindparams(bindparam("q", expanding=True))
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={"q": ["z2", "z3", "z4"]},
)
@testing.requires.tuple_in
def test_bound_in_two_tuple_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.y).in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(
stmt, [(2,), (3,), (4,)], params={"q": [(2, 3), (3, 4), (4, 5)]}
)
@testing.requires.tuple_in
def test_bound_in_two_tuple_direct(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.y).in_([(2, 3), (3, 4), (4, 5)]))
.order_by(table.c.id)
)
self._assert_result(stmt, [(2,), (3,), (4,)])
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(tuple_(table.c.x, table.c.z).in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]},
)
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple_direct(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(
tuple_(table.c.x, table.c.z).in_(
[(2, "z2"), (3, "z3"), (4, "z4")]
)
)
.order_by(table.c.id)
)
self._assert_result(
stmt,
[(2,), (3,), (4,)],
)
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple_text_bindparam(self):
# note this becomes ARRAY if we dont use expanding
# explicitly right now
stmt = text(
"select id FROM some_table WHERE (x, z) IN :q ORDER BY id"
).bindparams(bindparam("q", expanding=True))
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]},
)
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple_typed_bindparam_non_tuple(self):
class LikeATuple(collections_abc.Sequence):
def __init__(self, *data):
self._data = data
def __iter__(self):
return iter(self._data)
def __getitem__(self, idx):
return self._data[idx]
def __len__(self):
return len(self._data)
stmt = text(
"select id FROM some_table WHERE (x, z) IN :q ORDER BY id"
).bindparams(
bindparam(
"q", type_=TupleType(Integer(), String()), expanding=True
)
)
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={
"q": [
LikeATuple(2, "z2"),
LikeATuple(3, "z3"),
LikeATuple(4, "z4"),
]
},
)
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple_text_bindparam_non_tuple(self):
# note this becomes ARRAY if we dont use expanding
# explicitly right now
class LikeATuple(collections_abc.Sequence):
def __init__(self, *data):
self._data = data
def __iter__(self):
return iter(self._data)
def __getitem__(self, idx):
return self._data[idx]
def __len__(self):
return len(self._data)
stmt = text(
"select id FROM some_table WHERE (x, z) IN :q ORDER BY id"
).bindparams(bindparam("q", expanding=True))
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={
"q": [
LikeATuple(2, "z2"),
LikeATuple(3, "z3"),
LikeATuple(4, "z4"),
]
},
)
def test_empty_set_against_integer_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
def test_empty_set_against_integer_direct(self):
table = self.tables.some_table
stmt = select(table.c.id).where(table.c.x.in_([])).order_by(table.c.id)
self._assert_result(stmt, [])
def test_empty_set_against_integer_negation_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.x.not_in(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
def test_empty_set_against_integer_negation_direct(self):
table = self.tables.some_table
stmt = (
select(table.c.id).where(table.c.x.not_in([])).order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)])
def test_empty_set_against_string_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.z.in_(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
def test_empty_set_against_string_direct(self):
table = self.tables.some_table
stmt = select(table.c.id).where(table.c.z.in_([])).order_by(table.c.id)
self._assert_result(stmt, [])
def test_empty_set_against_string_negation_bindparam(self):
table = self.tables.some_table
stmt = (
select(table.c.id)
.where(table.c.z.not_in(bindparam("q")))
.order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
def test_empty_set_against_string_negation_direct(self):
table = self.tables.some_table
stmt = (
select(table.c.id).where(table.c.z.not_in([])).order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)])
def test_null_in_empty_set_is_false_bindparam(self, connection):
stmt = select(
case(
(
null().in_(bindparam("foo", value=())),
true(),
),
else_=false(),
)
)
in_(connection.execute(stmt).fetchone()[0], (False, 0))
def test_null_in_empty_set_is_false_direct(self, connection):
stmt = select(
case(
(
null().in_([]),
true(),
),
else_=false(),
)
)
in_(connection.execute(stmt).fetchone()[0], (False, 0))
class LikeFunctionsTest(fixtures.TablesTest):
__backend__ = True
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "abcdefg"},
{"id": 2, "data": "ab/cdefg"},
{"id": 3, "data": "ab%cdefg"},
{"id": 4, "data": "ab_cdefg"},
{"id": 5, "data": "abcde/fg"},
{"id": 6, "data": "abcde%fg"},
{"id": 7, "data": "ab#cdefg"},
{"id": 8, "data": "ab9cdefg"},
{"id": 9, "data": "abcde#fg"},
{"id": 10, "data": "abcd9fg"},
{"id": 11, "data": None},
],
)
def _test(self, expr, expected):
some_table = self.tables.some_table
with config.db.connect() as conn:
rows = {
value
for value, in conn.execute(select(some_table.c.id).where(expr))
}
eq_(rows, expected)
def test_startswith_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c"), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
def test_startswith_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c", autoescape=True), {3})
def test_startswith_sqlexpr(self):
col = self.tables.some_table.c.data
self._test(
col.startswith(literal_column("'ab%c'")),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
)
def test_startswith_escape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab##c", escape="#"), {7})
def test_startswith_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c", autoescape=True, escape="#"), {3})
self._test(col.startswith("ab#c", autoescape=True, escape="#"), {7})
def test_endswith_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg"), {1, 2, 3, 4, 5, 6, 7, 8, 9})
def test_endswith_sqlexpr(self):
col = self.tables.some_table.c.data
self._test(
col.endswith(literal_column("'e%fg'")), {1, 2, 3, 4, 5, 6, 7, 8, 9}
)
def test_endswith_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg", autoescape=True), {6})
def test_endswith_escape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e##fg", escape="#"), {9})
def test_endswith_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg", autoescape=True, escape="#"), {6})
self._test(col.endswith("e#fg", autoescape=True, escape="#"), {9})
def test_contains_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cde"), {1, 2, 3, 4, 5, 6, 7, 8, 9})
def test_contains_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cde", autoescape=True), {3})
def test_contains_escape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b##cde", escape="#"), {7})
def test_contains_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cd", autoescape=True, escape="#"), {3})
self._test(col.contains("b#cd", autoescape=True, escape="#"), {7})
@testing.requires.regexp_match
def test_not_regexp_match(self):
col = self.tables.some_table.c.data
self._test(~col.regexp_match("a.cde"), {2, 3, 4, 7, 8, 10})
@testing.requires.regexp_replace
def test_regexp_replace(self):
col = self.tables.some_table.c.data
self._test(
col.regexp_replace("a.cde", "FOO").contains("FOO"), {1, 5, 6, 9}
)
@testing.requires.regexp_match
@testing.combinations(
("a.cde", {1, 5, 6, 9}),
("abc", {1, 5, 6, 9, 10}),
("^abc", {1, 5, 6, 9, 10}),
("9cde", {8}),
("^a", set(range(1, 11))),
("(b|c)", set(range(1, 11))),
("^(b|c)", set()),
)
def test_regexp_match(self, text, expected):
col = self.tables.some_table.c.data
self._test(col.regexp_match(text), expected)
class ComputedColumnTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("computed_columns",)
@classmethod
def define_tables(cls, metadata):
Table(
"square",
metadata,
Column("id", Integer, primary_key=True),
Column("side", Integer),
Column("area", Integer, Computed("side * side")),
Column("perimeter", Integer, Computed("4 * side")),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.square.insert(),
[{"id": 1, "side": 10}, {"id": 10, "side": 42}],
)
def test_select_all(self):
with config.db.connect() as conn:
res = conn.execute(
select(text("*"))
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(1, 10, 100, 40), (10, 42, 1764, 168)])
def test_select_columns(self):
with config.db.connect() as conn:
res = conn.execute(
select(
self.tables.square.c.area, self.tables.square.c.perimeter
)
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(100, 40), (1764, 168)])
class IdentityColumnTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("identity_columns",)
run_inserts = "once"
run_deletes = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"tbl_a",
metadata,
Column(
"id",
Integer,
Identity(
always=True, start=42, nominvalue=True, nomaxvalue=True
),
primary_key=True,
),
Column("desc", String(100)),
)
Table(
"tbl_b",
metadata,
Column(
"id",
Integer,
Identity(increment=-5, start=0, minvalue=-1000, maxvalue=0),
primary_key=True,
),
Column("desc", String(100)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.tbl_a.insert(),
[{"desc": "a"}, {"desc": "b"}],
)
connection.execute(
cls.tables.tbl_b.insert(),
[{"desc": "a"}, {"desc": "b"}],
)
connection.execute(
cls.tables.tbl_b.insert(),
[{"id": 42, "desc": "c"}],
)
def test_select_all(self, connection):
res = connection.execute(
select(text("*"))
.select_from(self.tables.tbl_a)
.order_by(self.tables.tbl_a.c.id)
).fetchall()
eq_(res, [(42, "a"), (43, "b")])
res = connection.execute(
select(text("*"))
.select_from(self.tables.tbl_b)
.order_by(self.tables.tbl_b.c.id)
).fetchall()
eq_(res, [(-5, "b"), (0, "a"), (42, "c")])
def test_select_columns(self, connection):
res = connection.execute(
select(self.tables.tbl_a.c.id).order_by(self.tables.tbl_a.c.id)
).fetchall()
eq_(res, [(42,), (43,)])
@testing.requires.identity_columns_standard
def test_insert_always_error(self, connection):
def fn():
connection.execute(
self.tables.tbl_a.insert(),
[{"id": 200, "desc": "a"}],
)
assert_raises((DatabaseError, ProgrammingError), fn)
class IdentityAutoincrementTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("autoincrement_without_sequence",)
@classmethod
def define_tables(cls, metadata):
Table(
"tbl",
metadata,
Column(
"id",
Integer,
Identity(),
primary_key=True,
autoincrement=True,
),
Column("desc", String(100)),
)
def test_autoincrement_with_identity(self, connection):
res = connection.execute(self.tables.tbl.insert(), {"desc": "row"})
res = connection.execute(self.tables.tbl.select()).first()
eq_(res, (1, "row"))
class ExistsTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"stuff",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.stuff.insert(),
[
{"id": 1, "data": "some data"},
{"id": 2, "data": "some data"},
{"id": 3, "data": "some data"},
{"id": 4, "data": "some other data"},
],
)
def test_select_exists(self, connection):
stuff = self.tables.stuff
eq_(
connection.execute(
select(literal(1)).where(
exists().where(stuff.c.data == "some data")
)
).fetchall(),
[(1,)],
)
def test_select_exists_false(self, connection):
stuff = self.tables.stuff
eq_(
connection.execute(
select(literal(1)).where(
exists().where(stuff.c.data == "no data")
)
).fetchall(),
[],
)
class DistinctOnTest(AssertsCompiledSQL, fixtures.TablesTest):
__backend__ = True
@testing.fails_if(testing.requires.supports_distinct_on)
def test_distinct_on(self):
stm = select("*").distinct(column("q")).select_from(table("foo"))
with testing.expect_deprecated(
"DISTINCT ON is currently supported only by the PostgreSQL "
):
self.assert_compile(stm, "SELECT DISTINCT * FROM foo")
class IsOrIsNotDistinctFromTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("supports_is_distinct_from",)
@classmethod
def define_tables(cls, metadata):
Table(
"is_distinct_test",
metadata,
Column("id", Integer, primary_key=True),
Column("col_a", Integer, nullable=True),
Column("col_b", Integer, nullable=True),
)
@testing.combinations(
("both_int_different", 0, 1, 1),
("both_int_same", 1, 1, 0),
("one_null_first", None, 1, 1),
("one_null_second", 0, None, 1),
("both_null", None, None, 0),
id_="iaaa",
argnames="col_a_value, col_b_value, expected_row_count_for_is",
)
def test_is_or_is_not_distinct_from(
self, col_a_value, col_b_value, expected_row_count_for_is, connection
):
tbl = self.tables.is_distinct_test
connection.execute(
tbl.insert(),
[{"id": 1, "col_a": col_a_value, "col_b": col_b_value}],
)
result = connection.execute(
tbl.select().where(tbl.c.col_a.is_distinct_from(tbl.c.col_b))
).fetchall()
eq_(
len(result),
expected_row_count_for_is,
)
expected_row_count_for_is_not = (
1 if expected_row_count_for_is == 0 else 0
)
result = connection.execute(
tbl.select().where(tbl.c.col_a.is_not_distinct_from(tbl.c.col_b))
).fetchall()
eq_(
len(result),
expected_row_count_for_is_not,
)
|
{
"content_hash": "bf4a648491ff4ccaf56907c82e5221d5",
"timestamp": "",
"source": "github",
"line_count": 1881,
"max_line_length": 79,
"avg_line_length": 30.995746943115364,
"alnum_prop": 0.49391969538445707,
"repo_name": "zzzeek/sqlalchemy",
"id": "6394e4b9a10da712ecf0c33883c919c952d8b72c",
"size": "58326",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "lib/sqlalchemy/testing/suite/test_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
""" Download and install WordPress
"""
import os
import subprocess
import shutil
from distutils.dir_util import copy_tree
import tarfile
import urllib.request
import config_loader
import template
from teminal_colors import echo
# Public (html) directory
WWW_DIR = '/srv/www'
def install_wordpress():
"""Download and install WordPress"""
data = config_loader.parse_config('wordpress.json')
if not os.path.isdir(WWW_DIR):
os.mkdir(WWW_DIR)
_download_latest(data)
_configure(data)
_additional_dirs(data)
fix_permissions(data)
def _download_latest(data):
"""Download the latest WordPress release from the official site"""
_wp_url = 'https://wordpress.org/latest.tar.gz'
echo('downloading and extracting latest WordPress to %s'
% data['domain_name'], 'i')
if not os.path.isdir(os.path.join(WWW_DIR, data['domain_name'])):
os.makedirs(os.path.join(WWW_DIR, data['domain_name']), exist_ok=True)
with urllib.request.urlopen(_wp_url) as wp:
with open(os.path.join('/tmp', 'latest.tar.gz'), 'wb') as f:
f.write(wp.read())
tar = tarfile.open('/tmp/latest.tar.gz')
tar.extractall(WWW_DIR)
tar.close()
copy_tree(os.path.join(WWW_DIR, 'wordpress'),
os.path.join(WWW_DIR, data['domain_name']))
os.remove(os.path.join(WWW_DIR, 'wordpress'))
os.remove('/tmp/latest.tar.gz')
def _configure(data):
"""Generate a wp-config.php configuration file"""
# fetch secret salts
echo('generating wp-config.php file', 'i')
_url = 'https://api.wordpress.org/secret-key/1.1/salt/'
with urllib.request.urlopen(_url) as request:
_data = {}
_data['secrets'] = request.read().decode('utf8')
for key, value in data.items():
_data[key] = value
_tpl = template.generate_template('templates/wp-config.php', _data)
with open(os.path.join(
WWW_DIR, data['domain_name'], 'wp-config.php'), 'w') as config:
config.write(_tpl)
def _additional_dirs(data):
"""Create additional child dirs"""
echo('creating additional directories', 'i')
os.makedirs(os.path.join(WWW_DIR,
data['domain_name'], 'wp-content', 'upgrade'),
exist_ok=True)
def fix_permissions(data):
"""Setup directory ownership and file permissions"""
echo('fixing directory ownership and file permissions', 'i')
subprocess.call(['chown',
'-R', 'www-data:www-data',
WWW_DIR + '/%s' % data['domain_name']])
|
{
"content_hash": "ef5443e9c96939d04ebb855c8d78dd1d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 33.30379746835443,
"alnum_prop": 0.6092740402888636,
"repo_name": "stef-k/starter",
"id": "ec0a0c039b4293bf275f6d16a69b88e5ca9aece7",
"size": "2631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordpress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1092"
},
{
"name": "Nginx",
"bytes": "2175"
},
{
"name": "PHP",
"bytes": "2383"
},
{
"name": "Python",
"bytes": "35356"
},
{
"name": "Vim script",
"bytes": "10150"
}
],
"symlink_target": ""
}
|
"""Converts cirq circuits into latex using qcircuit."""
from cirq.contrib.qcircuit.qcircuit_diagram import (
circuit_to_latex_using_qcircuit,
)
from cirq.contrib.qcircuit.qcircuit_diagram_info import (
escape_text_for_latex,
get_multigate_parameters,
get_qcircuit_diagram_info,
)
|
{
"content_hash": "db5055d08cea09998bb2f53fbd0e6c94",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 57,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.7483221476510067,
"repo_name": "balopat/Cirq",
"id": "c01091cebbdec467eb373cb0eed8fe441c16c2f7",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/contrib/qcircuit/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5923"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Jupyter Notebook",
"bytes": "23905"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6256825"
},
{
"name": "Shell",
"bytes": "50383"
},
{
"name": "Starlark",
"bytes": "5979"
}
],
"symlink_target": ""
}
|
import os
from sys import argv
import ntpath
import requests
import hashlib
import json
FORMATS = ['py', 'html', 'java', 'go', 'htm', 'css', 'rb', 'md', 'txt']
URL_INDEX = "http://localhost:9200/code/"
URL_TYPE = URL_INDEX + "source_file/"
def read_file(file):
try:
txt = open(file)
return txt.read()
except:
print "Boom"
return None
def list_files(dir):
r = []
subdirs = [x[0] for x in os.walk(dir)]
for subdir in subdirs:
files = os.walk(subdir).next()[2]
if (len(files) > 0):
for file in files:
if not os.path.isdir(file):
r.append(subdir + "/" + file)
return r
def is_indexable(file_format):
return file_format in FORMATS
def get_format(file):
if "." in file:
pos = file.index(".")
return file[pos + 1:]
def get_file_name(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def process_file(file):
format = get_format(file)
if is_indexable(format):
raw = read_file(file)
if raw:
doc = {}
doc['source_code'] = read_file(file)
doc['path'] = file
doc['file_name'] = get_file_name(file)
doc['language'] = get_format(file)
m = hashlib.md5()
m.update(file)
doc['_id'] = m.hexdigest()
return doc
return None
def post_object(obj):
try:
r = requests.put(URL_TYPE + obj['_id'], json.dumps(obj))
print obj['path'] + " " + r.text
except Exception, e:
print e
script, filename = argv
files = list_files(filename)
for f in files:
obj = process_file(f)
if obj:
post_object(obj)
|
{
"content_hash": "b84f974f16e53eeb4c7d4d59715369a9",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 20.821428571428573,
"alnum_prop": 0.5403087478559176,
"repo_name": "ipedrazas/search-ctl",
"id": "16158df4c38cce0c7ab7dcc0dff511e14f33e29b",
"size": "1768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3080"
}
],
"symlink_target": ""
}
|
default_app_config = 'distributed_task.apps.DistributedTaskConfig'
from .core.decorators import register_task
|
{
"content_hash": "e7d4573be5d3ac93a4294901c935bb49",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 66,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.8363636363636363,
"repo_name": "mrcrgl/django_distributed_task",
"id": "9eb5202b39a9839de0a4a17557fc98f3c22d5e5d",
"size": "110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed_task/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26810"
},
{
"name": "Shell",
"bytes": "6493"
}
],
"symlink_target": ""
}
|
from ovsdbapp.backend.ovs_idl import idlutils
from neutron.common import _deprecate
_deprecate._MovedGlobals(idlutils)
|
{
"content_hash": "2ee1f3b6027cf890aec8a2326b4e74b6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 24.2,
"alnum_prop": 0.8264462809917356,
"repo_name": "eayunstack/neutron",
"id": "b8d140f8db1a3f550242fcb2ec7806b16121199e",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/ovsdb/native/idlutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
"""Support for Balboa Spa Wifi adaptor."""
from __future__ import annotations
import asyncio
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_HALVES,
PRECISION_WHOLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from .const import CLIMATE, CLIMATE_SUPPORTED_FANSTATES, CLIMATE_SUPPORTED_MODES, DOMAIN
from .entity import BalboaEntity
SET_TEMPERATURE_WAIT = 1
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the spa climate device."""
async_add_entities(
[
BalboaSpaClimate(
entry,
hass.data[DOMAIN][entry.entry_id],
CLIMATE,
)
],
)
class BalboaSpaClimate(BalboaEntity, ClimateEntity):
"""Representation of a Balboa Spa Climate device."""
_attr_icon = "mdi:hot-tub"
_attr_fan_modes = CLIMATE_SUPPORTED_FANSTATES
_attr_hvac_modes = CLIMATE_SUPPORTED_MODES
def __init__(self, entry, client, devtype, num=None):
"""Initialize the climate entity."""
super().__init__(entry, client, devtype, num)
self._balboa_to_ha_blower_map = {
self._client.BLOWER_OFF: FAN_OFF,
self._client.BLOWER_LOW: FAN_LOW,
self._client.BLOWER_MEDIUM: FAN_MEDIUM,
self._client.BLOWER_HIGH: FAN_HIGH,
}
self._ha_to_balboa_blower_map = {
value: key for key, value in self._balboa_to_ha_blower_map.items()
}
self._balboa_to_ha_heatmode_map = {
self._client.HEATMODE_READY: HVAC_MODE_HEAT,
self._client.HEATMODE_RNR: HVAC_MODE_AUTO,
self._client.HEATMODE_REST: HVAC_MODE_OFF,
}
self._ha_heatmode_to_balboa_map = {
value: key for key, value in self._balboa_to_ha_heatmode_map.items()
}
scale = self._client.get_tempscale()
self._attr_preset_modes = self._client.get_heatmode_stringlist()
self._attr_supported_features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
if self._client.have_blower():
self._attr_supported_features |= SUPPORT_FAN_MODE
self._attr_min_temp = self._client.tmin[self._client.TEMPRANGE_LOW][scale]
self._attr_max_temp = self._client.tmax[self._client.TEMPRANGE_HIGH][scale]
self._attr_temperature_unit = TEMP_FAHRENHEIT
self._attr_precision = PRECISION_WHOLE
if self._client.get_tempscale() == self._client.TSCALE_C:
self._attr_temperature_unit = TEMP_CELSIUS
self._attr_precision = PRECISION_HALVES
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode."""
mode = self._client.get_heatmode()
return self._balboa_to_ha_heatmode_map[mode]
@property
def hvac_action(self) -> str:
"""Return the current operation mode."""
state = self._client.get_heatstate()
if state >= self._client.ON:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def fan_mode(self) -> str:
"""Return the current fan mode."""
fanmode = self._client.get_blower()
return self._balboa_to_ha_blower_map.get(fanmode, FAN_OFF)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._client.get_curtemp()
@property
def target_temperature(self):
"""Return the target temperature we try to reach."""
return self._client.get_settemp()
@property
def preset_mode(self):
"""Return current preset mode."""
return self._client.get_heatmode(True)
async def async_set_temperature(self, **kwargs):
"""Set a new target temperature."""
scale = self._client.get_tempscale()
newtemp = kwargs[ATTR_TEMPERATURE]
if newtemp > self._client.tmax[self._client.TEMPRANGE_LOW][scale]:
await self._client.change_temprange(self._client.TEMPRANGE_HIGH)
await asyncio.sleep(SET_TEMPERATURE_WAIT)
if newtemp < self._client.tmin[self._client.TEMPRANGE_HIGH][scale]:
await self._client.change_temprange(self._client.TEMPRANGE_LOW)
await asyncio.sleep(SET_TEMPERATURE_WAIT)
await self._client.send_temp_change(newtemp)
async def async_set_preset_mode(self, preset_mode) -> None:
"""Set new preset mode."""
modelist = self._client.get_heatmode_stringlist()
self._async_validate_mode_or_raise(preset_mode)
if preset_mode not in modelist:
raise ValueError(f"{preset_mode} is not a valid preset mode")
await self._client.change_heatmode(modelist.index(preset_mode))
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
await self._client.change_blower(self._ha_to_balboa_blower_map[fan_mode])
def _async_validate_mode_or_raise(self, mode):
"""Check that the mode can be set."""
if mode == self._client.HEATMODE_RNR:
raise ValueError(f"{mode} can only be reported but not set")
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode.
OFF = Rest
AUTO = Ready in Rest (can't be set, only reported)
HEAT = Ready
"""
mode = self._ha_heatmode_to_balboa_map[hvac_mode]
self._async_validate_mode_or_raise(mode)
await self._client.change_heatmode(self._ha_heatmode_to_balboa_map[hvac_mode])
|
{
"content_hash": "593f08f1d02eea78ee936863d27a5a3f",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 88,
"avg_line_length": 36.04347826086956,
"alnum_prop": 0.6267447871790454,
"repo_name": "home-assistant/home-assistant",
"id": "edd44b03f17c108907f4a215b4db45eb78483151",
"size": "5803",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/balboa/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
import os
import sys
import threading
from datetime import datetime
from syncplay import constants
from syncplay import utils
from syncplay.messages import getMessage, getLanguages, setLanguage, getInitialLanguage
from syncplay.players.playerFactory import PlayerFactory
from syncplay.utils import isBSD, isLinux, isMacOS, isWindows
from syncplay.utils import resourcespath, posixresourcespath, playerPathExists
from syncplay.vendor.Qt import QtCore, QtWidgets, QtGui, __binding__, IsPySide, IsPySide2, IsPySide6
from syncplay.vendor.Qt.QtCore import Qt, QSettings, QCoreApplication, QSize, QPoint, QUrl, QLine, QEventLoop, Signal
from syncplay.vendor.Qt.QtWidgets import QApplication, QLineEdit, QLabel, QCheckBox, QButtonGroup, QRadioButton, QDoubleSpinBox, QPlainTextEdit
from syncplay.vendor.Qt.QtGui import QCursor, QIcon, QImage, QDesktopServices
try:
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
except AttributeError:
pass # To ignore error "Attribute Qt::AA_EnableHighDpiScaling must be set before QCoreApplication is created"
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
if IsPySide6:
from PySide6.QtCore import QStandardPaths
elif IsPySide2:
from PySide2.QtCore import QStandardPaths
class GuiConfiguration:
def __init__(self, config, error=None, defaultConfig=None):
self.defaultConfig = defaultConfig
self.config = config
self._availablePlayerPaths = []
self.error = error
constants.DEBUG_MODE = config['debug']
def run(self):
if QCoreApplication.instance() is None:
self.app = QtWidgets.QApplication(sys.argv)
dialog = ConfigDialog(self.config, self._availablePlayerPaths, self.error, self.defaultConfig)
configLoop = QEventLoop()
dialog.show()
dialog.closed.connect(configLoop.quit)
configLoop.exec_()
def setAvailablePaths(self, paths):
self._availablePlayerPaths = paths
def getProcessedConfiguration(self):
return self.config
class WindowClosed(Exception):
pass
class GetPlayerIconThread(threading.Thread, QtCore.QObject):
daemon = True
done = QtCore.Signal(str, str)
def __init__(self):
threading.Thread.__init__(self, name='GetPlayerIcon')
QtCore.QObject.__init__(self)
self.condvar = threading.Condition()
self.playerpath = None
def setPlayerPath(self, playerpath):
self.condvar.acquire()
was_none = self.playerpath is None
self.playerpath = playerpath
if was_none:
self.condvar.notify()
self.condvar.release()
def run(self):
while True:
self.condvar.acquire()
if self.playerpath is None:
self.condvar.wait()
playerpath = self.playerpath
self.playerpath = None
self.condvar.release()
self.done.emit('spinner.mng', '')
iconpath = PlayerFactory().getPlayerIconByPath(playerpath)
self.done.emit(iconpath, playerpath)
class ConfigDialog(QtWidgets.QDialog):
pressedclosebutton = True
moreToggling = False
closed = Signal()
def automaticUpdatePromptCheck(self):
if self.automaticupdatesCheckbox.checkState() == Qt.PartiallyChecked:
reply = QtWidgets.QMessageBox.question(
self, "Syncplay",
getMessage("promptforupdate-label"),
QtWidgets.QMessageBox.StandardButton.Yes | QtWidgets.QMessageBox.StandardButton.No)
if reply == QtWidgets.QMessageBox.Yes:
self.automaticupdatesCheckbox.setChecked(True)
else:
self.automaticupdatesCheckbox.setChecked(False)
def moreToggled(self):
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
if self.moreToggling is False:
self.moreToggling = True
if self.showmoreCheckbox.isChecked():
self.tabListFrame.show()
self.resetButton.show()
self.playerargsTextbox.show()
self.playerargsLabel.show()
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
self.runButton.show()
self.saveMoreState(True)
self.tabListWidget.setCurrentRow(0)
self.ensureTabListIsVisible()
if isMacOS(): self.mediaplayerSettingsGroup.setFixedHeight(self.mediaplayerSettingsGroup.minimumSizeHint().height())
self.stackedFrame.setFixedHeight(self.stackedFrame.minimumSizeHint().height())
else:
self.tabListFrame.hide()
self.resetButton.hide()
self.playerargsTextbox.hide()
self.playerargsLabel.hide()
self.runButton.hide()
if self.mediapathTextbox.text() == "":
self.mediapathTextbox.hide()
self.mediapathLabel.hide()
self.mediabrowseButton.hide()
else:
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
self.saveMoreState(False)
self.stackedLayout.setCurrentIndex(0)
if isMacOS():
self.mediaplayerSettingsGroup.setFixedHeight(self.mediaplayerSettingsGroup.minimumSizeHint().height())
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
else:
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+13
if self.error:
newHeight += self.errorLabel.height()+3
self.stackedFrame.setFixedHeight(newHeight)
self.adjustSize()
if isMacOS():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50+16
self.setFixedWidth(self.sizeHint().width())
self.setFixedHeight(newHeight)
else:
self.setFixedSize(self.sizeHint())
self.moreToggling = False
self.setFixedWidth(self.minimumSizeHint().width())
def openHelp(self):
self.QtGui.QDesktopServices.openUrl(QUrl("https://syncplay.pl/guide/client/"))
def openRoomsDialog(self):
RoomsDialog = QtWidgets.QDialog()
RoomsLayout = QtWidgets.QGridLayout()
RoomsTextbox = QtWidgets.QPlainTextEdit()
RoomsDialog.setWindowTitle(getMessage("roomlist-msgbox-label"))
RoomsPlaylistLabel = QtWidgets.QLabel(getMessage("roomlist-msgbox-label"))
RoomsTextbox.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
RoomsTextbox.setPlainText(utils.getListAsMultilineString(self.config['roomList']))
RoomsLayout.addWidget(RoomsPlaylistLabel, 0, 0, 1, 1)
RoomsLayout.addWidget(RoomsTextbox, 1, 0, 1, 1)
RoomsButtonBox = QtWidgets.QDialogButtonBox()
RoomsButtonBox.setOrientation(Qt.Horizontal)
RoomsButtonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
RoomsButtonBox.accepted.connect(RoomsDialog.accept)
RoomsButtonBox.rejected.connect(RoomsDialog.reject)
RoomsLayout.addWidget(RoomsButtonBox, 2, 0, 1, 1)
RoomsDialog.setLayout(RoomsLayout)
RoomsDialog.setModal(True)
RoomsDialog.setWindowFlags(RoomsDialog.windowFlags() & ~Qt.WindowContextHelpButtonHint)
RoomsDialog.show()
result = RoomsDialog.exec_()
if result == QtWidgets.QDialog.Accepted:
newRooms = utils.convertMultilineStringToList(RoomsTextbox.toPlainText())
newRooms = sorted(newRooms)
self.relistRoomList(newRooms)
def safenormcaseandpath(self, path):
if utils.isURL(path):
return path
else:
return os.path.normcase(os.path.normpath(path))
def _tryToFillPlayerPath(self, playerpath, playerpathlist):
settings = QSettings("Syncplay", "PlayerList")
settings.beginGroup("PlayerList")
savedPlayers = settings.value("PlayerList", [])
if not isinstance(savedPlayers, list):
savedPlayers = []
else:
for i, savedPlayer in enumerate(savedPlayers):
savedPlayers[i] = self.safenormcaseandpath(savedPlayer)
playerpathlist = list(set(playerpathlist + savedPlayers))
settings.endGroup()
foundpath = ""
if playerpath is not None and playerpath != "":
if utils.isURL(playerpath):
foundpath = playerpath
self.executablepathCombobox.addItem(foundpath)
else:
if not playerPathExists(playerpath):
expandedpath = PlayerFactory().getExpandedPlayerPathByPath(playerpath)
if expandedpath is not None and playerPathExists(expandedpath):
playerpath = expandedpath
elif "mpvnet.exe" in playerpath and playerPathExists(playerpath.replace("mpvnet.exe","mpvnet.com")):
self.executablepathCombobox.addItem(playerpath)
if playerPathExists(playerpath):
foundpath = playerpath
self.executablepathCombobox.addItem(foundpath)
for path in playerpathlist:
if utils.isURL(path):
if foundpath == "":
foundpath = path
if path != playerpath:
self.executablepathCombobox.addItem(path)
elif playerPathExists(path) and os.path.normcase(os.path.normpath(path)) != os.path.normcase(os.path.normpath(foundpath)):
self.executablepathCombobox.addItem(path)
if foundpath == "":
foundpath = path
if foundpath != "":
settings.beginGroup("PlayerList")
playerpathlist.append(self.safenormcaseandpath(foundpath))
settings.setValue("PlayerList", list(set(playerpathlist)))
settings.endGroup()
return foundpath
@QtCore.Slot(str, str)
def _updateExecutableIcon(self, iconpath, playerpath):
if iconpath is not None and iconpath != "":
if iconpath.endswith('.mng'):
movie = QtGui.QMovie(resourcespath + iconpath)
movie.setCacheMode(QtGui.QMovie.CacheMode.CacheAll)
self.executableiconLabel.setMovie(movie)
movie.start()
else:
self.executableiconImage.load(resourcespath + iconpath)
self.executableiconLabel.setPixmap(QtGui.QPixmap.fromImage(self.executableiconImage))
else:
self.executableiconLabel.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage()))
self.updatePlayerArguments(playerpath)
def updateExecutableIcon(self):
"""
Start getting the icon path in another thread, which will set the GUI
icon if valid.
This is performed outside the main thread because networked players may
take a long time to perform their checks and hang the GUI while doing
so.
"""
currentplayerpath = str(self.executablepathCombobox.currentText())
self._playerProbeThread.setPlayerPath(currentplayerpath)
def updatePlayerArguments(self, currentplayerpath):
argumentsForPath = utils.getPlayerArgumentsByPathAsText(self.perPlayerArgs, currentplayerpath)
self.playerargsTextbox.blockSignals(True)
self.playerargsTextbox.setText(argumentsForPath)
self.playerargsTextbox.blockSignals(False)
def changedPlayerArgs(self):
currentplayerpath = self.executablepathCombobox.currentText()
if currentplayerpath:
NewPlayerArgs = self.playerargsTextbox.text().split(" ") if self.playerargsTextbox.text() else ""
self.perPlayerArgs[self.executablepathCombobox.currentText()] = NewPlayerArgs
def languageChanged(self):
setLanguage(str(self.languageCombobox.itemData(self.languageCombobox.currentIndex())))
QtWidgets.QMessageBox.information(self, "Syncplay", getMessage("language-changed-msgbox-label"))
def browsePlayerpath(self):
options = QtWidgets.QFileDialog.Options()
defaultdirectory = ""
browserfilter = "All files (*)"
if os.name == 'nt':
browserfilter = "Executable files (*.exe);;All files (*)"
if "PROGRAMFILES(X86)" in os.environ:
defaultdirectory = os.environ["ProgramFiles(x86)"]
elif "PROGRAMFILES" in os.environ:
defaultdirectory = os.environ["ProgramFiles"]
elif "PROGRAMW6432" in os.environ:
defaultdirectory = os.environ["ProgramW6432"]
elif isLinux():
defaultdirectory = "/usr/bin"
elif isMacOS():
defaultdirectory = "/Applications/"
elif isBSD():
defaultdirectory = "/usr/local/bin"
fileName, filtr = QtWidgets.QFileDialog.getOpenFileName(
self,
"Browse for media player executable",
defaultdirectory,
browserfilter, "", options)
if fileName:
if isMacOS() and fileName.endswith('.app'): # see GitHub issue #91
# Mac OS X application bundles contain a Info.plist in the Contents subdirectory of the .app.
# This plist file includes the 'CFBundleExecutable' key, which specifies the name of the
# executable. I would have used plistlib here, but since the version of this library in
# py < 3.4 can't read from binary plist files it's pretty much useless. Therefore, let's
# play a game of "Guess my executable!"
# Step 1: get all the executable files. In a Mac OS X Application bundle, executables are stored
# inside <bundle root>/Contents/MacOS.
execPath = os.path.join(os.path.normpath(fileName), 'Contents', 'MacOS')
execFiles = []
for fn in os.listdir(execPath):
fn = os.path.join(execPath, fn)
if os.path.isfile(fn) and os.access(fn, os.X_OK):
execFiles.append(fn)
# Step 2: figure out which file name looks like the application name
baseAppName = os.path.basename(fileName).replace('.app', '').lower()
foundExe = False
for fn in execFiles:
baseExecName = os.path.basename(fn).lower()
if baseAppName == baseExecName:
fileName = fn
foundExe = True
break
# Step 3: use the first executable in the list if no executable was found
try:
if not foundExe:
fileName = execFiles[0]
except IndexError: # whoops, looks like this .app doesn't contain a executable file at all
pass
self.executablepathCombobox.setEditText(os.path.normpath(fileName))
def loadLastUpdateCheckDate(self):
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("Update")
try:
self.lastCheckedForUpdates = settings.value("lastCheckedQt", None)
if self.lastCheckedForUpdates:
if self.config["lastCheckedForUpdates"] != None and self.config["lastCheckedForUpdates"] != "":
if self.lastCheckedForUpdates.toPython() > datetime.strptime(self.config["lastCheckedForUpdates"], "%Y-%m-%d %H:%M:%S.%f"):
self.config["lastCheckedForUpdates"] = self.lastCheckedForUpdates.toString("yyyy-MM-d HH:mm:ss.z")
else:
self.config["lastCheckedForUpdates"] = self.lastCheckedForUpdates.toString("yyyy-MM-d HH:mm:ss.z")
except:
self.config["lastCheckedForUpdates"] = None
def loadSavedPublicServerList(self):
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("PublicServerList")
self.publicServers = settings.value("publicServers", None)
def loadMediaBrowseSettings(self):
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.beginGroup("MediaBrowseDialog")
self.mediadirectory = settings.value("mediadir", "")
settings.endGroup()
def saveMediaBrowseSettings(self):
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.beginGroup("MediaBrowseDialog")
settings.setValue("mediadir", self.mediadirectory)
settings.endGroup()
def getMoreState(self):
settings = QSettings("Syncplay", "MoreSettings")
settings.beginGroup("MoreSettings")
morestate = str.lower(str(settings.value("ShowMoreSettings", "false")))
settings.endGroup()
if morestate == "true":
return True
else:
return False
def saveMoreState(self, morestate):
settings = QSettings("Syncplay", "MoreSettings")
settings.beginGroup("MoreSettings")
settings.setValue("ShowMoreSettings", morestate)
settings.endGroup()
def updateServerList(self):
try:
servers = utils.getListOfPublicServers()
except IOError as e:
self.showErrorMessage(e.args[0])
return
currentServer = self.hostCombobox.currentText()
self.hostCombobox.clear()
if servers:
i = 0
for server in servers:
self.hostCombobox.addItem(server[1])
self.hostCombobox.setItemData(i, server[0], Qt.ToolTipRole)
i += 1
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("PublicServerList")
settings.setValue("publicServers", servers)
self.hostCombobox.setEditText(currentServer)
def fillRoomsCombobox(self):
previousRoomSelection = self.roomsCombobox.currentText()
self.roomsCombobox.clear()
for roomListValue in self.config['roomList']:
self.roomsCombobox.addItem(roomListValue)
self.roomsCombobox.setEditText(previousRoomSelection)
def relistRoomList(self, newRooms):
filteredNewRooms = [room for room in newRooms if room and not room.isspace()]
self.config['roomList'] = filteredNewRooms
self.fillRoomsCombobox()
def addRoomToList(self, newRoom=None):
if newRoom is None:
newRoom = self.roomsCombobox.currentText()
if not newRoom:
return
roomList = self.config['roomList']
if newRoom not in roomList:
roomList.append(newRoom)
roomList = sorted(roomList)
self.config['roomList'] = roomList
def showErrorMessage(self, errorMessage):
QtWidgets.QMessageBox.warning(self, "Syncplay", errorMessage)
def browseMediapath(self):
self.loadMediaBrowseSettings()
options = QtWidgets.QFileDialog.Options()
if IsPySide:
if self.config["mediaSearchDirectories"] and os.path.isdir(self.config["mediaSearchDirectories"][0]):
defaultdirectory = self.config["mediaSearchDirectories"][0]
elif os.path.isdir(self.mediadirectory):
defaultdirectory = self.mediadirectory
elif os.path.isdir(QDesktopServices.storageLocation(QDesktopServices.MoviesLocation)):
defaultdirectory = QDesktopServices.storageLocation(QDesktopServices.MoviesLocation)
elif os.path.isdir(QDesktopServices.storageLocation(QDesktopServices.HomeLocation)):
defaultdirectory = QDesktopServices.storageLocation(QDesktopServices.HomeLocation)
else:
defaultdirectory = ""
elif IsPySide6 or IsPySide2:
if self.config["mediaSearchDirectories"] and os.path.isdir(self.config["mediaSearchDirectories"][0]):
defaultdirectory = self.config["mediaSearchDirectories"][0]
elif os.path.isdir(self.mediadirectory):
defaultdirectory = self.mediadirectory
elif os.path.isdir(QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]):
defaultdirectory = QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]
elif os.path.isdir(QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0]):
defaultdirectory = QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0]
else:
defaultdirectory = ""
browserfilter = "All files (*)"
fileName, filtr = QtWidgets.QFileDialog.getOpenFileName(
self, "Browse for media files", defaultdirectory,
browserfilter, "", options)
if fileName:
self.mediapathTextbox.setText(os.path.normpath(fileName))
self.mediadirectory = os.path.dirname(fileName)
self.saveMediaBrowseSettings()
def _runWithoutStoringConfig(self):
self._saveDataAndLeave(False)
def _saveDataAndLeave(self, storeConfiguration=True):
self.config['noStore'] = not storeConfiguration
if storeConfiguration:
self.automaticUpdatePromptCheck()
self.loadLastUpdateCheckDate()
self.config["perPlayerArguments"] = self.perPlayerArgs
self.config["mediaSearchDirectories"] = utils.convertMultilineStringToList(self.mediasearchTextEdit.toPlainText())
self.config["trustedDomains"] = utils.convertMultilineStringToList(self.trusteddomainsTextEdit.toPlainText())
if self.serverpassTextbox.isEnabled():
self.config['password'] = self.serverpassTextbox.text()
self.processWidget(self, lambda w: self.saveValues(w))
if self.hostCombobox.currentText():
self.config['host'] = self.hostCombobox.currentText() if ":" in self.hostCombobox.currentText() else self.hostCombobox.currentText() + ":" + str(constants.DEFAULT_PORT)
self.config['host'] = self.config['host'].replace(" ", "").replace("\t", "").replace("\n", "").replace("\r", "")
else:
self.config['host'] = None
self.config['playerPath'] = str(self.safenormcaseandpath(self.executablepathCombobox.currentText()))
self.config['language'] = str(self.languageCombobox.itemData(self.languageCombobox.currentIndex()))
if self.mediapathTextbox.text() == "":
self.config['file'] = None
elif os.path.isfile(os.path.abspath(self.mediapathTextbox.text())):
self.config['file'] = os.path.abspath(self.mediapathTextbox.text())
else:
self.config['file'] = str(self.mediapathTextbox.text())
self.config['publicServers'] = self.publicServerAddresses
self.config['room'] = self.roomsCombobox.currentText()
if self.config['autosaveJoinsToList']:
self.addRoomToList(self.config['room'])
self.pressedclosebutton = False
self.close()
self.closed.emit()
def closeEvent(self, event):
if self.pressedclosebutton:
super(ConfigDialog, self).closeEvent(event)
self.closed.emit()
sys.exit()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
sys.exit()
def dragEnterEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
event.acceptProposedAction()
def dropEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
dropfilepath = os.path.abspath(str(event.mimeData().urls()[0].toLocalFile()))
if dropfilepath[-4:].lower() == ".exe":
self.executablepathCombobox.setEditText(dropfilepath)
else:
self.mediapathTextbox.setText(dropfilepath)
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
if not self.showmoreCheckbox.isChecked():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height() + self.mediaplayerSettingsGroup.minimumSizeHint().height() + self.bottomButtonFrame.minimumSizeHint().height() + 3
if self.error:
newHeight += self.errorLabel.height() + 3
self.stackedFrame.setFixedHeight(newHeight)
self.adjustSize()
self.setFixedSize(self.sizeHint())
def processWidget(self, container, torun):
for widget in container.children():
self.processWidget(widget, torun)
if hasattr(widget, 'objectName') and widget.objectName() and widget.objectName()[:3] != "qt_":
torun(widget)
def loadTooltips(self, widget):
tooltipName = widget.objectName().lower().split(constants.CONFIG_NAME_MARKER)[0] + "-tooltip"
if tooltipName[:1] == constants.INVERTED_STATE_MARKER or tooltipName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
tooltipName = tooltipName[1:]
widget.setToolTip(getMessage(tooltipName))
def loadValues(self, widget):
valueName = str(widget.objectName())
if valueName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
return
if isinstance(widget, QCheckBox) and widget.objectName():
if valueName[:1] == constants.INVERTED_STATE_MARKER:
valueName = valueName[1:]
inverted = True
else:
inverted = False
if self.config[valueName] is None:
widget.setTristate(True)
widget.setCheckState(Qt.PartiallyChecked)
widget.stateChanged.connect(lambda: widget.setTristate(False))
else:
widget.setChecked(self.config[valueName] != inverted)
elif isinstance(widget, QRadioButton):
radioName, radioValue = valueName.split(constants.CONFIG_NAME_MARKER)[1].split(constants.CONFIG_VALUE_MARKER)
if self.config[radioName] == radioValue:
widget.setChecked(True)
elif isinstance(widget, QLineEdit):
widget.setText(self.config[valueName])
def saveValues(self, widget):
valueName = str(widget.objectName())
if valueName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
return
if isinstance(widget, QCheckBox) and widget.objectName():
if widget.checkState() == Qt.PartiallyChecked:
self.config[valueName] = None
else:
if valueName[:1] == constants.INVERTED_STATE_MARKER:
valueName = valueName[1:]
inverted = True
else:
inverted = False
self.config[valueName] = widget.isChecked() != inverted
elif isinstance(widget, QRadioButton):
radioName, radioValue = valueName.split(constants.CONFIG_NAME_MARKER)[1].split(constants.CONFIG_VALUE_MARKER)
if widget.isChecked():
self.config[radioName] = radioValue
elif isinstance(widget, QLineEdit):
self.config[valueName] = widget.text()
def connectChildren(self, widget):
widgetName = str(widget.objectName())
if widgetName in self.subitems:
widget.stateChanged.connect(lambda: self.updateSubwidgets(self, widget))
self.updateSubwidgets(self, widget)
def updateSubwidgets(self, container, parentwidget, subwidgets=None):
widgetName = parentwidget.objectName()
if not subwidgets:
subwidgets = self.subitems[widgetName]
for widget in container.children():
self.updateSubwidgets(widget, parentwidget, subwidgets)
if hasattr(widget, 'objectName') and widget.objectName() and widget.objectName() in subwidgets:
widget.setDisabled(not parentwidget.isChecked())
def addBasicTab(self):
config = self.config
playerpaths = self.playerpaths
error = self.error
if self.datacleared == True:
error = constants.ERROR_MESSAGE_MARKER + "{}".format(getMessage("gui-data-cleared-notification"))
self.error = error
if config['host'] is None:
host = ""
elif ":" in config['host'] and '[' not in config['host']:
host = config['host']
else:
host = config['host'] + ":" + str(config['port'])
self.perPlayerArgs = self.config["perPlayerArguments"]
self.mediaSearchDirectories = self.config["mediaSearchDirectories"]
self.trustedDomains = self.config["trustedDomains"]
self.connectionSettingsGroup = QtWidgets.QGroupBox(getMessage("connection-group-title"))
self.loadSavedPublicServerList()
self.hostCombobox = QtWidgets.QComboBox(self)
if self.publicServers:
i = 0
for publicServer in self.publicServers:
serverTitle = publicServer[0]
serverAddressPort = publicServer[1]
self.hostCombobox.addItem(serverAddressPort)
self.hostCombobox.setItemData(i, serverTitle, Qt.ToolTipRole)
if serverAddressPort not in self.publicServerAddresses:
self.publicServerAddresses.append(serverAddressPort)
i += 1
self.hostCombobox.setEditable(True)
self.hostCombobox.setEditText(host)
self.hostLabel = QLabel(getMessage("host-label"), self)
self.usernameTextbox = QLineEdit(self)
self.usernameTextbox.setObjectName("name")
self.serverpassLabel = QLabel(getMessage("password-label"), self)
self.roomsCombobox = QtWidgets.QComboBox(self)
self.roomsCombobox.setEditable(True)
caseSensitiveCompleter = QtWidgets.QCompleter("", self)
caseSensitiveCompleter.setCaseSensitivity(Qt.CaseSensitive)
self.roomsCombobox.setCompleter(caseSensitiveCompleter)
self.fillRoomsCombobox()
self.roomsCombobox.setEditText(config['room'])
self.usernameLabel = QLabel(getMessage("name-label"), self)
self.serverpassTextbox = QLineEdit(self)
self.serverpassTextbox.setText(self.storedPassword)
self.defaultroomLabel = QLabel(getMessage("room-label"), self)
self.editRoomsButton = QtWidgets.QToolButton()
self.editRoomsButton.setIcon(QtGui.QIcon(resourcespath + 'bullet_edit_centered.png'))
self.editRoomsButton.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "edit-rooms")
self.editRoomsButton.released.connect(self.openRoomsDialog)
self.hostLabel.setObjectName("host")
self.hostCombobox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "host")
self.usernameLabel.setObjectName("name")
self.usernameTextbox.setObjectName("name")
self.serverpassLabel.setObjectName("password")
self.serverpassTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "password")
self.hostCombobox.editTextChanged.connect(self.updatePasswordVisibilty)
self.hostCombobox.currentIndexChanged.connect(self.updatePasswordVisibilty)
self.defaultroomLabel.setObjectName("room")
self.roomsCombobox.setObjectName("room")
self.connectionSettingsLayout = QtWidgets.QGridLayout()
self.connectionSettingsLayout.addWidget(self.hostLabel, 0, 0)
self.connectionSettingsLayout.addWidget(self.hostCombobox, 0, 1)
self.connectionSettingsLayout.addWidget(self.serverpassLabel, 1, 0)
self.connectionSettingsLayout.addWidget(self.serverpassTextbox, 1, 1)
self.connectionSettingsLayout.addWidget(self.usernameLabel, 2, 0)
self.connectionSettingsLayout.addWidget(self.usernameTextbox, 2, 1)
self.connectionSettingsLayout.addWidget(self.defaultroomLabel, 3, 0)
self.connectionSettingsLayout.addWidget(self.editRoomsButton, 3, 2, Qt.AlignRight)
self.connectionSettingsLayout.addWidget(self.roomsCombobox, 3, 1)
self.connectionSettingsLayout.setSpacing(10)
self.connectionSettingsGroup.setLayout(self.connectionSettingsLayout)
if isMacOS():
self.connectionSettingsGroup.setFixedHeight(self.connectionSettingsGroup.minimumSizeHint().height())
else:
self.connectionSettingsGroup.setMaximumHeight(self.connectionSettingsGroup.minimumSizeHint().height())
self.playerargsTextbox = QLineEdit("", self)
self.playerargsTextbox.textEdited.connect(self.changedPlayerArgs)
self.playerargsLabel = QLabel(getMessage("player-arguments-label"), self)
self.mediaplayerSettingsGroup = QtWidgets.QGroupBox(getMessage("media-setting-title"))
self.executableiconImage = QtGui.QImage()
self.executableiconLabel = QLabel(self)
self.executableiconLabel.setFixedWidth(16)
self.executableiconLabel.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.executablepathCombobox = QtWidgets.QComboBox(self)
self.executablepathCombobox.setEditable(True)
self.executablepathCombobox.currentIndexChanged.connect(self.updateExecutableIcon)
self.executablepathCombobox.setEditText(self._tryToFillPlayerPath(config['playerPath'], playerpaths))
self.executablepathCombobox.editTextChanged.connect(self.updateExecutableIcon)
self.executablepathLabel = QLabel(getMessage("executable-path-label"), self)
self.executablebrowseButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'folder_explore.png'), getMessage("browse-label"))
self.executablebrowseButton.clicked.connect(self.browsePlayerpath)
self.mediapathTextbox = QLineEdit(config['file'], self)
self.mediapathLabel = QLabel(getMessage("media-path-label"), self)
self.mediabrowseButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'folder_explore.png'), getMessage("browse-label"))
self.mediabrowseButton.clicked.connect(self.browseMediapath)
self.executablepathLabel.setObjectName("executable-path")
self.executablepathCombobox.setObjectName("executable-path")
self.executablepathCombobox.setMinimumContentsLength(constants.EXECUTABLE_COMBOBOX_MINIMUM_LENGTH)
if not IsPySide6:
self.executablepathCombobox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToMinimumContentsLength)
else:
self.executablepathCombobox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.mediapathLabel.setObjectName("media-path")
self.mediapathTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "media-path")
self.playerargsLabel.setObjectName("player-arguments")
self.playerargsTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "player-arguments")
self.mediaplayerSettingsLayout = QtWidgets.QGridLayout()
self.mediaplayerSettingsLayout.addWidget(self.executablepathLabel, 0, 0, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executableiconLabel, 0, 1, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executablepathCombobox, 0, 2, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executablebrowseButton, 0, 3, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.mediapathLabel, 1, 0, 1, 2)
self.mediaplayerSettingsLayout.addWidget(self.mediapathTextbox, 1, 2, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.mediabrowseButton, 1, 3, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.playerargsLabel, 2, 0, 1, 2)
self.mediaplayerSettingsLayout.addWidget(self.playerargsTextbox, 2, 2, 1, 2)
self.mediaplayerSettingsLayout.setSpacing(10)
self.mediaplayerSettingsGroup.setLayout(self.mediaplayerSettingsLayout)
iconWidth = self.executableiconLabel.minimumSize().width()+self.mediaplayerSettingsLayout.spacing()
maxWidth = max(
self.hostLabel.minimumSizeHint().width(),
self.usernameLabel.minimumSizeHint().width(),
self.serverpassLabel.minimumSizeHint().width(),
self.defaultroomLabel.minimumSizeHint().width(),
self.executablepathLabel.minimumSizeHint().width(),
self.mediapathLabel.minimumSizeHint().width(),
self.playerargsLabel.minimumSizeHint().width()
)
self.hostLabel.setMinimumWidth(maxWidth+iconWidth)
self.usernameLabel.setMinimumWidth(maxWidth+iconWidth)
self.serverpassLabel.setMinimumWidth(maxWidth+iconWidth)
self.defaultroomLabel.setMinimumWidth(maxWidth+iconWidth)
self.executablepathLabel.setMinimumWidth(maxWidth)
self.mediapathLabel.setMinimumWidth(maxWidth+iconWidth)
self.playerargsLabel.setMinimumWidth(maxWidth+iconWidth)
self.showmoreCheckbox = QCheckBox(getMessage("more-title"))
self.showmoreCheckbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "more")
self.basicOptionsFrame = QtWidgets.QFrame()
self.basicOptionsLayout = QtWidgets.QVBoxLayout()
if error:
error = str(error)
self.errorLabel = QLabel(self)
if error[:1] != constants.ERROR_MESSAGE_MARKER:
self.errorLabel.setStyleSheet(constants.STYLE_ERRORLABEL)
else:
error = error[1:]
self.errorLabel.setStyleSheet(constants.STYLE_SUCCESSLABEL)
self.errorLabel.setText(error)
self.errorLabel.setAlignment(Qt.AlignCenter)
self.basicOptionsLayout.addWidget(self.errorLabel)
self.connectionSettingsGroup.setMaximumHeight(self.connectionSettingsGroup.minimumSizeHint().height())
self.basicOptionsLayout.setAlignment(Qt.AlignTop)
self.basicOptionsLayout.addWidget(self.connectionSettingsGroup)
self.basicOptionsLayout.addSpacing(5)
self.basicOptionsLayout.addWidget(self.mediaplayerSettingsGroup)
self.basicOptionsFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.basicOptionsFrame.setLayout(self.basicOptionsLayout)
self.stackedLayout.addWidget(self.basicOptionsFrame)
def addReadinessTab(self):
self.readyFrame = QtWidgets.QFrame()
self.readyLayout = QtWidgets.QVBoxLayout()
self.readyFrame.setLayout(self.readyLayout)
# Initial state
self.readyInitialGroup = QtWidgets.QGroupBox(getMessage("readiness-title"))
self.readyInitialLayout = QtWidgets.QVBoxLayout()
self.readyInitialGroup.setLayout(self.readyInitialLayout)
self.readyatstartCheckbox = QCheckBox(getMessage("readyatstart-label"))
self.readyatstartCheckbox.setObjectName("readyAtStart")
self.readyInitialLayout.addWidget(self.readyatstartCheckbox)
self.readyLayout.addWidget(self.readyInitialGroup)
# Automatically pausing
self.readyPauseGroup = QtWidgets.QGroupBox(getMessage("pausing-title"))
self.readyPauseLayout = QtWidgets.QVBoxLayout()
self.readyPauseGroup.setLayout(self.readyPauseLayout)
self.pauseonleaveCheckbox = QCheckBox(getMessage("pauseonleave-label"))
self.pauseonleaveCheckbox.setObjectName("pauseOnLeave")
self.readyPauseLayout.addWidget(self.pauseonleaveCheckbox)
self.readyLayout.addWidget(self.readyPauseGroup)
# Unpausing
self.readyUnpauseGroup = QtWidgets.QGroupBox(getMessage("unpause-title"))
self.readyUnpauseLayout = QtWidgets.QVBoxLayout()
self.readyUnpauseGroup.setLayout(self.readyUnpauseLayout)
self.readyUnpauseButtonGroup = QButtonGroup()
self.unpauseIfAlreadyReadyOption = QRadioButton(getMessage("unpause-ifalreadyready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfAlreadyReadyOption)
self.unpauseIfAlreadyReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfAlreadyReadyOption.setObjectName("unpause-ifalreadyready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFALREADYREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfAlreadyReadyOption)
self.unpauseIfOthersReadyOption = QRadioButton(getMessage("unpause-ifothersready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfOthersReadyOption)
self.unpauseIfOthersReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfOthersReadyOption.setObjectName("unpause-ifothersready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFOTHERSREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfOthersReadyOption)
self.unpauseIfMinUsersReadyOption = QRadioButton(getMessage("unpause-ifminusersready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfMinUsersReadyOption)
self.unpauseIfMinUsersReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfMinUsersReadyOption.setObjectName("unpause-ifminusersready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFMINUSERSREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfMinUsersReadyOption)
self.unpauseAlwaysUnpauseOption = QRadioButton(getMessage("unpause-always"))
self.readyUnpauseButtonGroup.addButton(self.unpauseAlwaysUnpauseOption)
self.unpauseAlwaysUnpauseOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseAlwaysUnpauseOption.setObjectName("unpause-always" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_ALWAYS_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseAlwaysUnpauseOption)
self.readyLayout.addWidget(self.readyUnpauseGroup)
self.readyLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.readyFrame)
def addMiscTab(self):
self.miscFrame = QtWidgets.QFrame()
self.miscLayout = QtWidgets.QVBoxLayout()
self.miscFrame.setLayout(self.miscLayout)
self.coreSettingsGroup = QtWidgets.QGroupBox(getMessage("core-behaviour-title"))
self.coreSettingsLayout = QtWidgets.QGridLayout()
self.coreSettingsGroup.setLayout(self.coreSettingsLayout)
### Privacy:
self.filenameprivacyLabel = QLabel(getMessage("filename-privacy-label"), self)
self.filenameprivacyButtonGroup = QButtonGroup()
self.filenameprivacySendRawOption = QRadioButton(getMessage("privacy-sendraw-option"))
self.filenameprivacySendHashedOption = QRadioButton(getMessage("privacy-sendhashed-option"))
self.filenameprivacyDontSendOption = QRadioButton(getMessage("privacy-dontsend-option"))
self.filenameprivacyButtonGroup.addButton(self.filenameprivacySendRawOption)
self.filenameprivacyButtonGroup.addButton(self.filenameprivacySendHashedOption)
self.filenameprivacyButtonGroup.addButton(self.filenameprivacyDontSendOption)
self.filesizeprivacyLabel = QLabel(getMessage("filesize-privacy-label"), self)
self.filesizeprivacyButtonGroup = QButtonGroup()
self.filesizeprivacySendRawOption = QRadioButton(getMessage("privacy-sendraw-option"))
self.filesizeprivacySendHashedOption = QRadioButton(getMessage("privacy-sendhashed-option"))
self.filesizeprivacyDontSendOption = QRadioButton(getMessage("privacy-dontsend-option"))
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacySendRawOption)
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacySendHashedOption)
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacyDontSendOption)
self.filenameprivacyLabel.setObjectName("filename-privacy")
self.filenameprivacySendRawOption.setObjectName("privacy-sendraw" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDRAW_MODE)
self.filenameprivacySendHashedOption.setObjectName("privacy-sendhashed" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDHASHED_MODE)
self.filenameprivacyDontSendOption.setObjectName("privacy-dontsend" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_DONTSEND_MODE)
self.filesizeprivacyLabel.setObjectName("filesize-privacy")
self.filesizeprivacySendRawOption.setObjectName("privacy-sendraw" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDRAW_MODE)
self.filesizeprivacySendHashedOption.setObjectName("privacy-sendhashed" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDHASHED_MODE)
self.filesizeprivacyDontSendOption.setObjectName("privacy-dontsend" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_DONTSEND_MODE)
self.coreSettingsLayout.addWidget(self.filenameprivacyLabel, 3, 0)
self.coreSettingsLayout.addWidget(self.filenameprivacySendRawOption, 3, 1, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filenameprivacySendHashedOption, 3, 2, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filenameprivacyDontSendOption, 3, 3, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacyLabel, 4, 0)
self.coreSettingsLayout.addWidget(self.filesizeprivacySendRawOption, 4, 1, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacySendHashedOption, 4, 2, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacyDontSendOption, 4, 3, Qt.AlignLeft)
## Syncplay internals
self.internalSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-internals-title"))
self.internalSettingsLayout = QtWidgets.QVBoxLayout()
self.internalSettingsGroup.setLayout(self.internalSettingsLayout)
self.alwaysshowCheckbox = QCheckBox(getMessage("forceguiprompt-label"))
self.alwaysshowCheckbox.setObjectName(constants.INVERTED_STATE_MARKER + "forceGuiPrompt")
self.internalSettingsLayout.addWidget(self.alwaysshowCheckbox)
self.automaticupdatesCheckbox = QCheckBox(getMessage("checkforupdatesautomatically-label"))
self.automaticupdatesCheckbox.setObjectName("checkForUpdatesAutomatically")
self.internalSettingsLayout.addWidget(self.automaticupdatesCheckbox)
self.autosaveJoinsToListCheckbox = QCheckBox(getMessage("autosavejoinstolist-label"))
self.autosaveJoinsToListCheckbox.setObjectName("autosaveJoinsToList")
self.internalSettingsLayout.addWidget(self.autosaveJoinsToListCheckbox)
## Media path directories
self.mediasearchSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-mediasearchdirectories-title"))
self.mediasearchSettingsLayout = QtWidgets.QVBoxLayout()
self.mediasearchSettingsGroup.setLayout(self.mediasearchSettingsLayout)
self.mediasearchTextEdit = QPlainTextEdit(utils.getListAsMultilineString(self.mediaSearchDirectories))
self.mediasearchTextEdit.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "mediasearcdirectories-arguments")
self.mediasearchTextEdit.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.mediasearchSettingsLayout.addWidget(self.mediasearchTextEdit)
self.mediasearchSettingsGroup.setMaximumHeight(self.mediasearchSettingsGroup.minimumSizeHint().height())
self.miscLayout.addWidget(self.coreSettingsGroup)
self.miscLayout.addWidget(self.internalSettingsGroup)
self.miscLayout.addWidget(self.mediasearchSettingsGroup)
self.miscLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.miscFrame)
def addSyncTab(self):
self.syncSettingsFrame = QtWidgets.QFrame()
self.syncSettingsLayout = QtWidgets.QVBoxLayout()
self.desyncSettingsGroup = QtWidgets.QGroupBox(getMessage("sync-otherslagging-title"))
self.desyncOptionsFrame = QtWidgets.QFrame()
self.desyncSettingsOptionsLayout = QtWidgets.QHBoxLayout()
config = self.config
self.slowdownCheckbox = QCheckBox(getMessage("slowondesync-label"))
self.slowdownCheckbox.setObjectName("slowOnDesync")
self.rewindCheckbox = QCheckBox(getMessage("rewindondesync-label"))
self.rewindCheckbox.setObjectName("rewindOnDesync")
self.fastforwardCheckbox = QCheckBox(getMessage("fastforwardondesync-label"))
self.fastforwardCheckbox.setObjectName("fastforwardOnDesync")
self.desyncSettingsLayout = QtWidgets.QGridLayout()
self.desyncSettingsLayout.setSpacing(2)
self.desyncFrame = QtWidgets.QFrame()
self.desyncFrame.setLineWidth(0)
self.desyncFrame.setMidLineWidth(0)
self.desyncSettingsLayout.addWidget(self.slowdownCheckbox, 0, 0, 1, 2, Qt.AlignLeft)
self.desyncSettingsLayout.addWidget(self.rewindCheckbox, 1, 0, 1, 2, Qt.AlignLeft)
self.desyncSettingsLayout.setAlignment(Qt.AlignLeft)
self.desyncSettingsGroup.setLayout(self.desyncSettingsLayout)
self.desyncSettingsOptionsLayout.addWidget(self.desyncFrame)
self.desyncFrame.setLayout(self.syncSettingsLayout)
self.othersyncSettingsGroup = QtWidgets.QGroupBox(getMessage("sync-youlaggging-title"))
self.othersyncOptionsFrame = QtWidgets.QFrame()
self.othersyncSettingsLayout = QtWidgets.QGridLayout()
self.dontslowwithmeCheckbox = QCheckBox(getMessage("dontslowdownwithme-label"))
self.dontslowwithmeCheckbox.setObjectName("dontSlowDownWithMe")
self.othersyncSettingsLayout.addWidget(self.dontslowwithmeCheckbox, 2, 0, 1, 2, Qt.AlignLeft)
self.othersyncSettingsLayout.setAlignment(Qt.AlignLeft)
self.othersyncSettingsLayout.addWidget(self.fastforwardCheckbox, 3, 0, 1, 2, Qt.AlignLeft)
## Trusted domains
self.trusteddomainsSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-trusteddomains-title"))
self.trusteddomainsSettingsLayout = QtWidgets.QVBoxLayout()
self.trusteddomainsSettingsGroup.setLayout(self.trusteddomainsSettingsLayout)
self.trusteddomainsTextEdit = QPlainTextEdit(utils.getListAsMultilineString(self.trustedDomains))
self.trusteddomainsTextEdit.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "trusteddomains-arguments")
self.trusteddomainsTextEdit.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.trusteddomainsSettingsLayout.addWidget(self.trusteddomainsTextEdit)
self.trusteddomainsSettingsGroup.setMaximumHeight(self.trusteddomainsSettingsGroup.minimumSizeHint().height())
self.othersyncSettingsGroup.setLayout(self.othersyncSettingsLayout)
self.othersyncSettingsGroup.setMaximumHeight(self.othersyncSettingsGroup.minimumSizeHint().height())
self.syncSettingsLayout.addWidget(self.othersyncSettingsGroup)
self.syncSettingsLayout.addWidget(self.desyncSettingsGroup)
self.syncSettingsLayout.addWidget(self.trusteddomainsSettingsGroup)
self.syncSettingsFrame.setLayout(self.syncSettingsLayout)
self.desyncSettingsGroup.setMaximumHeight(self.desyncSettingsGroup.minimumSizeHint().height())
self.syncSettingsLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.syncSettingsFrame)
def addChatTab(self):
self.chatFrame = QtWidgets.QFrame()
self.chatLayout = QtWidgets.QVBoxLayout()
self.chatLayout.setAlignment(Qt.AlignTop)
# Input
self.chatInputGroup = QtWidgets.QGroupBox(getMessage("chat-title"))
self.chatInputLayout = QtWidgets.QGridLayout()
self.chatLayout.addWidget(self.chatInputGroup)
self.chatInputGroup.setLayout(self.chatInputLayout)
self.chatInputEnabledCheckbox = QCheckBox(getMessage("chatinputenabled-label"))
self.chatInputEnabledCheckbox.setObjectName("chatInputEnabled")
self.chatInputLayout.addWidget(self.chatInputEnabledCheckbox, 1, 0, 1, 1, Qt.AlignLeft)
self.chatDirectInputCheckbox = QCheckBox(getMessage("chatdirectinput-label"))
self.chatDirectInputCheckbox.setObjectName("chatDirectInput")
self.chatDirectInputCheckbox.setStyleSheet(
constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatInputLayout.addWidget(self.chatDirectInputCheckbox, 2, 0, 1, 1, Qt.AlignLeft)
self.inputFontLayout = QtWidgets.QHBoxLayout()
self.inputFontLayout.setContentsMargins(0, 0, 0, 0)
self.inputFontFrame = QtWidgets.QFrame()
self.inputFontFrame.setLayout(self.inputFontLayout)
self.inputFontFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatFontLabel = QLabel(getMessage("chatinputfont-label"), self)
self.chatFontLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatFontLabel.setObjectName("font-label")
self.chatInputFontButton = QtWidgets.QPushButton(getMessage("chatfont-label"))
self.chatInputFontButton.setObjectName("set-input-font")
self.chatInputFontButtonGroup = QtWidgets.QButtonGroup()
self.chatInputFontButtonGroup.addButton(self.chatInputFontButton)
self.chatInputFontButton.released.connect(lambda: self.fontDialog("chatInput"))
self.chatInputColourButton = QtWidgets.QPushButton(getMessage("chatcolour-label"))
self.chatInputColourButton.setObjectName("set-input-colour")
self.chatInputColourButtonGroup = QtWidgets.QButtonGroup()
self.chatInputColourButtonGroup.addButton(self.chatInputColourButton)
self.chatInputColourButton.released.connect(lambda: self.colourDialog("chatInput"))
self.inputFontLayout.addWidget(self.chatFontLabel, Qt.AlignLeft)
self.inputFontLayout.addWidget(self.chatInputFontButton, Qt.AlignLeft)
self.inputFontLayout.addWidget(self.chatInputColourButton, Qt.AlignLeft)
self.chatInputLayout.addWidget(self.inputFontFrame, 3, 0, 1, 3, Qt.AlignLeft)
self.chatInputPositionFrame = QtWidgets.QFrame()
self.chatInputPositionLayout = QtWidgets.QHBoxLayout()
self.chatInputPositionLayout.setContentsMargins(0, 0, 0, 0)
self.chatInputPositionFrame.setLayout(self.chatInputPositionLayout)
self.chatInputPositionFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatInputPositionLabel = QLabel(getMessage("chatinputposition-label"), self)
self.chatInputPositionLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatInputPositionGroup = QButtonGroup()
self.chatInputTopOption = QRadioButton(getMessage("chat-top-option"))
self.chatInputMiddleOption = QRadioButton(getMessage("chat-middle-option"))
self.chatInputBottomOption = QRadioButton(getMessage("chat-bottom-option"))
self.chatInputPositionGroup.addButton(self.chatInputTopOption)
self.chatInputPositionGroup.addButton(self.chatInputMiddleOption)
self.chatInputPositionGroup.addButton(self.chatInputBottomOption)
self.chatInputPositionLabel.setObjectName("chatinputposition")
self.chatInputTopOption.setObjectName("chatinputposition-top" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_TOP)
self.chatInputMiddleOption.setObjectName("chatinputposition-middle" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_MIDDLE)
self.chatInputBottomOption.setObjectName("chatinputposition-bottom" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_BOTTOM)
self.chatInputPositionLayout.addWidget(self.chatInputPositionLabel)
self.chatInputPositionLayout.addWidget(self.chatInputTopOption)
self.chatInputPositionLayout.addWidget(self.chatInputMiddleOption)
self.chatInputPositionLayout.addWidget(self.chatInputBottomOption)
self.chatInputLayout.addWidget(self.chatInputPositionFrame)
self.subitems['chatInputEnabled'] = [self.chatInputPositionLabel.objectName(), self.chatInputTopOption.objectName(),
self.chatInputMiddleOption.objectName(), self.chatInputBottomOption.objectName(),
self.chatInputFontButton.objectName(), self.chatFontLabel.objectName(),
self.chatInputColourButton.objectName(), self.chatDirectInputCheckbox.objectName()]
# Output
self.chatOutputGroup = QtWidgets.QGroupBox(getMessage("chatoutputheader-label"))
self.chatOutputLayout = QtWidgets.QGridLayout()
self.chatLayout.addWidget(self.chatOutputGroup)
self.chatOutputGroup.setLayout(self.chatOutputLayout)
self.chatOutputEnabledCheckbox = QCheckBox(getMessage("chatoutputenabled-label"))
self.chatOutputEnabledCheckbox.setObjectName("chatOutputEnabled")
self.chatOutputLayout.addWidget(self.chatOutputEnabledCheckbox, 1, 0, 1, 1, Qt.AlignLeft)
self.outputFontLayout = QtWidgets.QHBoxLayout()
self.outputFontLayout.setContentsMargins(0, 0, 0, 0)
self.outputFontFrame = QtWidgets.QFrame()
self.outputFontFrame.setLayout(self.outputFontLayout)
self.outputFontFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatOutputFontLabel = QLabel(getMessage("chatoutputfont-label"), self)
self.chatOutputFontLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatOutputFontLabel.setObjectName("font-output-label")
self.chatOutputFontButton = QtWidgets.QPushButton(getMessage("chatfont-label"))
self.chatOutputFontButton.setObjectName("set-output-font")
self.chatOutputFontButtonGroup = QtWidgets.QButtonGroup()
self.chatOutputFontButtonGroup.addButton(self.chatOutputFontButton)
self.chatOutputFontButton.released.connect(lambda: self.fontDialog("chatOutput"))
self.chatOutputColourButton = QtWidgets.QPushButton(getMessage("chatcolour-label"))
self.outputFontLayout.addWidget(self.chatOutputFontLabel, Qt.AlignLeft)
self.outputFontLayout.addWidget(self.chatOutputFontButton, Qt.AlignLeft)
self.chatOutputLayout.addWidget(self.outputFontFrame, 2, 0, 1, 3, Qt.AlignLeft)
self.chatOutputModeLabel = QLabel(getMessage("chatoutputposition-label"), self)
self.chatOutputModeLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatOutputModeGroup = QButtonGroup()
self.chatOutputChatroomOption = QRadioButton(getMessage("chat-chatroom-option"))
self.chatOutputScrollingOption = QRadioButton(getMessage("chat-scrolling-option"))
self.chatOutputModeGroup.addButton(self.chatOutputChatroomOption)
self.chatOutputModeGroup.addButton(self.chatOutputScrollingOption)
self.chatOutputModeLabel.setObjectName("chatoutputmode")
self.chatOutputChatroomOption.setObjectName("chatoutputmode-chatroom" + constants.CONFIG_NAME_MARKER + "chatOutputMode" + constants.CONFIG_VALUE_MARKER + constants.CHATROOM_MODE)
self.chatOutputScrollingOption.setObjectName("chatoutputmode-scrolling" + constants.CONFIG_NAME_MARKER + "chatOutputMode" + constants.CONFIG_VALUE_MARKER + constants.SCROLLING_MODE)
self.chatOutputModeFrame = QtWidgets.QFrame()
self.chatOutputModeLayout = QtWidgets.QHBoxLayout()
self.chatOutputModeLayout.setContentsMargins(0, 0, 0, 0)
self.chatOutputModeFrame.setLayout(self.chatOutputModeLayout)
self.chatOutputModeFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatOutputModeLayout.addWidget(self.chatOutputModeLabel)
self.chatOutputModeLayout.addWidget(self.chatOutputChatroomOption)
self.chatOutputModeLayout.addWidget(self.chatOutputScrollingOption)
self.chatOutputLayout.addWidget(self.chatOutputModeFrame)
self.subitems['chatOutputEnabled'] = [self.chatOutputModeLabel.objectName(), self.chatOutputChatroomOption.objectName(),
self.chatOutputScrollingOption.objectName(), self.chatOutputFontButton.objectName(),
self.chatOutputFontLabel.objectName()]
# chatFrame
self.chatFrame.setLayout(self.chatLayout)
self.stackedLayout.addWidget(self.chatFrame)
def fontDialog(self, configName):
font = QtGui.QFont()
font.setFamily(self.config[configName + "FontFamily"])
font.setPointSize(self.config[configName + "RelativeFontSize"])
font.setWeight(self.config[configName + "FontWeight"])
font.setUnderline(self.config[configName + "FontUnderline"])
ok, value = QtWidgets.QFontDialog.getFont(font)
if ok:
self.config[configName + "FontFamily"] = value.family()
self.config[configName + "RelativeFontSize"] = value.pointSize()
self.config[configName + "FontWeight"] = value.weight()
self.config[configName + "FontUnderline"] = value.underline()
def colourDialog(self, configName):
oldColour = QtGui.QColor()
oldColour.setNamedColor(self.config[configName + "FontColor"])
colour = QtWidgets.QColorDialog.getColor(oldColour, self)
if colour.isValid():
self.config[configName + "FontColor"] = colour.name()
def addMessageTab(self):
self.messageFrame = QtWidgets.QFrame()
self.messageLayout = QtWidgets.QVBoxLayout()
self.messageLayout.setAlignment(Qt.AlignTop)
# OSD
self.osdSettingsGroup = QtWidgets.QGroupBox(getMessage("messages-osd-title"))
self.osdSettingsLayout = QtWidgets.QVBoxLayout()
self.osdSettingsFrame = QtWidgets.QFrame()
self.showOSDCheckbox = QCheckBox(getMessage("showosd-label"))
self.showOSDCheckbox.setObjectName("showOSD")
self.osdSettingsLayout.addWidget(self.showOSDCheckbox)
self.showSameRoomOSDCheckbox = QCheckBox(getMessage("showsameroomosd-label"))
self.showSameRoomOSDCheckbox.setObjectName("showSameRoomOSD")
self.showSameRoomOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showSameRoomOSDCheckbox)
self.showNonControllerOSDCheckbox = QCheckBox(getMessage("shownoncontrollerosd-label"))
self.showNonControllerOSDCheckbox.setObjectName("showNonControllerOSD")
self.showNonControllerOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showNonControllerOSDCheckbox)
self.showDifferentRoomOSDCheckbox = QCheckBox(getMessage("showdifferentroomosd-label"))
self.showDifferentRoomOSDCheckbox.setObjectName("showDifferentRoomOSD")
self.showDifferentRoomOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showDifferentRoomOSDCheckbox)
self.slowdownOSDCheckbox = QCheckBox(getMessage("showslowdownosd-label"))
self.slowdownOSDCheckbox.setObjectName("showSlowdownOSD")
self.slowdownOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.slowdownOSDCheckbox)
self.showOSDWarningsCheckbox = QCheckBox(getMessage("showosdwarnings-label"))
self.showOSDWarningsCheckbox.setObjectName("showOSDWarnings")
self.showOSDWarningsCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showOSDWarningsCheckbox)
self.subitems['showOSD'] = ["showSameRoomOSD", "showDifferentRoomOSD", "showSlowdownOSD", "showOSDWarnings", "showNonControllerOSD"]
self.osdSettingsGroup.setLayout(self.osdSettingsLayout)
self.osdSettingsGroup.setMaximumHeight(self.osdSettingsGroup.minimumSizeHint().height())
self.osdSettingsLayout.setAlignment(Qt.AlignTop)
self.messageLayout.addWidget(self.osdSettingsGroup)
# Other display
self.displaySettingsGroup = QtWidgets.QGroupBox(getMessage("messages-other-title"))
self.displaySettingsLayout = QtWidgets.QVBoxLayout()
self.displaySettingsLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.displaySettingsFrame = QtWidgets.QFrame()
self.showDurationNotificationCheckbox = QCheckBox(getMessage("showdurationnotification-label"))
self.showDurationNotificationCheckbox.setObjectName("showDurationNotification")
self.displaySettingsLayout.addWidget(self.showDurationNotificationCheckbox)
self.languageFrame = QtWidgets.QFrame()
self.languageLayout = QtWidgets.QHBoxLayout()
self.languageLayout.setContentsMargins(0, 0, 0, 0)
self.languageFrame.setLayout(self.languageLayout)
self.languageFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.languageLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.languageLabel = QLabel(getMessage("language-label"), self)
self.languageCombobox = QtWidgets.QComboBox(self)
self.languageCombobox.addItem(getMessage("automatic-language").format(getMessage("LANGUAGE", getInitialLanguage())))
self.languages = getLanguages()
for lang in self.languages:
self.languageCombobox.addItem(self.languages[lang], lang)
if lang == self.config['language']:
self.languageCombobox.setCurrentIndex(self.languageCombobox.count()-1)
self.languageCombobox.currentIndexChanged.connect(self.languageChanged)
self.languageLayout.addWidget(self.languageLabel, 1, Qt.AlignLeft)
self.languageLayout.addWidget(self.languageCombobox, 1, Qt.AlignLeft)
self.displaySettingsLayout.addWidget(self.languageFrame)
self.languageLabel.setObjectName("language")
self.languageCombobox.setObjectName("language")
self.languageFrame.setMaximumWidth(self.languageFrame.minimumSizeHint().width())
self.displaySettingsGroup.setLayout(self.displaySettingsLayout)
self.displaySettingsGroup.setMaximumHeight(self.displaySettingsGroup.minimumSizeHint().height())
self.displaySettingsLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.messageLayout.addWidget(self.displaySettingsGroup)
# messageFrame
self.messageFrame.setLayout(self.messageLayout)
self.stackedLayout.addWidget(self.messageFrame)
def addBottomLayout(self):
config = self.config
self.bottomButtonFrame = QtWidgets.QFrame()
self.bottomButtonLayout = QtWidgets.QHBoxLayout()
self.helpButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'help.png'), getMessage("help-label"))
self.helpButton.setObjectName("help")
self.helpButton.setMaximumSize(self.helpButton.sizeHint())
self.helpButton.released.connect(self.openHelp)
self.resetButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'cog_delete.png'), getMessage("reset-label"))
self.resetButton.setMaximumSize(self.resetButton.sizeHint())
self.resetButton.setObjectName("reset")
self.resetButton.released.connect(self.resetSettings)
self.runButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("run-label"))
self.runButton.released.connect(self._runWithoutStoringConfig)
self.runButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("run-label"))
self.runButton.pressed.connect(self._runWithoutStoringConfig)
self.runButton.setToolTip(getMessage("nostore-tooltip"))
self.storeAndRunButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("storeandrun-label"))
self.storeAndRunButton.released.connect(self._saveDataAndLeave)
self.bottomButtonLayout.addWidget(self.helpButton)
self.bottomButtonLayout.addWidget(self.resetButton)
self.bottomButtonLayout.addWidget(self.runButton)
self.bottomButtonLayout.addWidget(self.storeAndRunButton)
self.bottomButtonFrame.setLayout(self.bottomButtonLayout)
if isMacOS():
self.bottomButtonLayout.setContentsMargins(15, 0, 15, 0)
else:
self.bottomButtonLayout.setContentsMargins(5, 0, 5, 0)
self.mainLayout.addWidget(self.bottomButtonFrame, 1, 0, 1, 2)
self.bottomCheckboxFrame = QtWidgets.QFrame()
if isMacOS():
self.bottomCheckboxFrame.setContentsMargins(3, 0, 6, 0)
else:
self.bottomCheckboxFrame.setContentsMargins(0, 0, 0, 0)
self.bottomCheckboxLayout = QtWidgets.QGridLayout()
self.alwaysshowCheckbox = QCheckBox(getMessage("forceguiprompt-label"))
self.enableplaylistsCheckbox = QCheckBox(getMessage("sharedplaylistenabled-label"))
self.bottomCheckboxLayout.addWidget(self.showmoreCheckbox)
self.bottomCheckboxLayout.addWidget(self.enableplaylistsCheckbox, 0, 2, Qt.AlignRight)
self.alwaysshowCheckbox.setObjectName(constants.INVERTED_STATE_MARKER + "forceGuiPrompt")
self.enableplaylistsCheckbox.setObjectName("sharedPlaylistEnabled")
self.bottomCheckboxFrame.setLayout(self.bottomCheckboxLayout)
self.mainLayout.addWidget(self.bottomCheckboxFrame, 2, 0, 1, 2)
def tabList(self):
self.tabListLayout = QtWidgets.QHBoxLayout()
self.tabListFrame = QtWidgets.QFrame()
self.tabListWidget = QtWidgets.QListWidget()
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "house.png"), getMessage("basics-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "control_pause_blue.png"), getMessage("readiness-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "film_link.png"), getMessage("sync-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "user_comment.png"), getMessage("chat-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "error.png"), getMessage("messages-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "cog.png"), getMessage("misc-label")))
self.tabListLayout.addWidget(self.tabListWidget)
self.tabListFrame.setLayout(self.tabListLayout)
self.tabListFrame.setFixedWidth(self.tabListFrame.minimumSizeHint().width() + constants.TAB_PADDING)
self.tabListWidget.setStyleSheet(constants.STYLE_TABLIST)
self.tabListWidget.currentItemChanged.connect(self.tabChange)
self.tabListWidget.itemClicked.connect(self.tabChange)
self.tabListWidget.itemPressed.connect(self.tabChange)
self.mainLayout.addWidget(self.tabListFrame, 0, 0, 1, 1)
def ensureTabListIsVisible(self):
self.stackedFrame.setFixedWidth(self.stackedFrame.width())
while self.tabListWidget.horizontalScrollBar().isVisible() and self.tabListFrame.width() < constants.MAXIMUM_TAB_WIDTH:
self.tabListFrame.setFixedWidth(self.tabListFrame.width()+1)
def tabChange(self):
self.setFocus()
self.stackedLayout.setCurrentIndex(self.tabListWidget.currentRow())
def resetSettings(self):
self.clearGUIData(leaveMore=True)
self.config['resetConfig'] = True
self.pressedclosebutton = False
self.close()
self.closed.emit()
def showEvent(self, *args, **kwargs):
self.ensureTabListIsVisible()
self.setFixedWidth(self.minimumSizeHint().width())
def clearGUIData(self, leaveMore=False):
settings = QSettings("Syncplay", "PlayerList")
settings.clear()
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.clear()
settings = QSettings("Syncplay", "MainWindow")
settings.clear()
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("Update")
settings.setValue("lastChecked", None)
settings.setValue("lastCheckedQt", None)
settings.endGroup()
settings.beginGroup("PublicServerList")
settings.setValue("publicServers", None)
settings.endGroup()
if not leaveMore:
settings = QSettings("Syncplay", "MoreSettings")
settings.clear()
self.datacleared = True
def populateEmptyServerList(self):
if self.publicServers is None:
if self.config["checkForUpdatesAutomatically"] == True:
self.updateServerList()
else:
currentServer = self.hostCombobox.currentText()
self.publicServers = constants.FALLBACK_PUBLIC_SYNCPLAY_SERVERS
i = 0
for server in self.publicServers:
self.hostCombobox.addItem(server[1])
self.hostCombobox.setItemData(i, server[0], Qt.ToolTipRole)
if not server[1] in self.publicServerAddresses:
self.publicServerAddresses.append(server[1])
i += 1
self.hostCombobox.setEditText(currentServer)
def updatePasswordVisibilty(self):
if (self.hostCombobox.currentText() == "" and self.serverpassTextbox.text() == "") or str(self.hostCombobox.currentText()) in self.publicServerAddresses:
self.serverpassTextbox.setDisabled(True)
self.serverpassTextbox.setReadOnly(True)
if self.serverpassTextbox.text() != "":
self.storedPassword = self.serverpassTextbox.text()
self.serverpassTextbox.setText("")
else:
self.serverpassTextbox.setEnabled(True)
self.serverpassTextbox.setReadOnly(False)
self.serverpassTextbox.setText(self.storedPassword)
def createMenubar(self):
self.menuBar = QtWidgets.QMenuBar()
# Edit menu
self.editMenu = QtWidgets.QMenu(getMessage("edit-menu-label"), self)
self.cutAction = self.editMenu.addAction(getMessage("cut-menu-label"))
self.cutAction.setShortcuts(QtGui.QKeySequence.Cut)
self.copyAction = self.editMenu.addAction(getMessage("copy-menu-label"))
self.copyAction.setShortcuts(QtGui.QKeySequence.Copy)
self.pasteAction = self.editMenu.addAction(getMessage("paste-menu-label"))
self.pasteAction.setShortcuts(QtGui.QKeySequence.Paste)
self.selectAction = self.editMenu.addAction(getMessage("selectall-menu-label"))
self.selectAction.setShortcuts(QtGui.QKeySequence.SelectAll)
self.editMenu.addSeparator()
self.menuBar.addMenu(self.editMenu)
self.mainLayout.setMenuBar(self.menuBar)
def __init__(self, config, playerpaths, error, defaultConfig):
self.config = config
self.defaultConfig = defaultConfig
self.playerpaths = playerpaths
self.datacleared = False
self.config['resetConfig'] = False
self.subitems = {}
self.publicServers = None
self.publicServerAddresses = []
self._playerProbeThread = GetPlayerIconThread()
self._playerProbeThread.done.connect(self._updateExecutableIcon)
self._playerProbeThread.start()
if self.config['clearGUIData'] == True:
self.config['clearGUIData'] = False
self.clearGUIData()
self.QtWidgets = QtWidgets
self.QtGui = QtGui
self.error = error
if isWindows():
resourcespath = utils.findWorkingDir() + "\\resources\\"
else:
resourcespath = utils.findWorkingDir() + "/resources/"
self.posixresourcespath = utils.findWorkingDir().replace("\\", "/") + "/resources/"
self.resourcespath = resourcespath
super(ConfigDialog, self).__init__()
self.setWindowTitle(getMessage("config-window-title"))
self.setWindowFlags(self.windowFlags() & Qt.WindowCloseButtonHint & ~Qt.WindowContextHelpButtonHint)
self.setWindowIcon(QtGui.QIcon(resourcespath + "syncplay.png"))
self.stackedLayout = QtWidgets.QStackedLayout()
self.stackedFrame = QtWidgets.QFrame()
self.stackedFrame.setLayout(self.stackedLayout)
self.mainLayout = QtWidgets.QGridLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.storedPassword = self.config['password']
self.addBasicTab()
self.addReadinessTab()
self.addSyncTab()
self.addChatTab()
self.addMessageTab()
self.addMiscTab()
self.tabList()
if isMacOS():
self.createMenubar()
self.config['menuBar'] = dict()
self.config['menuBar']['bar'] = self.menuBar
self.config['menuBar']['editMenu'] = self.editMenu
else:
self.config['menuBar'] = None
self.mainLayout.addWidget(self.stackedFrame, 0, 1)
self.addBottomLayout()
self.updatePasswordVisibilty()
if self.getMoreState() == False:
self.tabListFrame.hide()
self.resetButton.hide()
self.playerargsTextbox.hide()
self.playerargsLabel.hide()
self.runButton.hide()
if self.mediapathTextbox.text() == "":
self.mediapathTextbox.hide()
self.mediapathLabel.hide()
self.mediabrowseButton.hide()
else:
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
if isMacOS():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
else:
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+13
if self.error:
newHeight += self.errorLabel.height() + 3
self.stackedFrame.setFixedHeight(newHeight)
else:
self.showmoreCheckbox.setChecked(True)
self.tabListWidget.setCurrentRow(0)
self.stackedFrame.setFixedHeight(self.stackedFrame.minimumSizeHint().height())
self.showmoreCheckbox.toggled.connect(self.moreToggled)
self.setLayout(self.mainLayout)
if self.config['noStore']:
self.runButton.setFocus()
else:
self.storeAndRunButton.setFocus()
if isMacOS():
initialHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
if self.error:
initialHeight += 40
self.setFixedWidth(self.sizeHint().width())
self.setFixedHeight(initialHeight)
else:
self.setFixedSize(self.sizeHint())
self.setAcceptDrops(True)
if constants.SHOW_TOOLTIPS:
self.processWidget(self, lambda w: self.loadTooltips(w))
self.processWidget(self, lambda w: self.loadValues(w))
self.processWidget(self, lambda w: self.connectChildren(w))
self.populateEmptyServerList()
|
{
"content_hash": "59a5b1706a17bbff436b85d54708eb85",
"timestamp": "",
"source": "github",
"line_count": 1509,
"max_line_length": 204,
"avg_line_length": 53.91583830351226,
"alnum_prop": 0.6964318637151391,
"repo_name": "Syncplay/syncplay",
"id": "c5d05121b17d977d593dbfc1b83f499353c844c9",
"size": "81360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syncplay/ui/GuiConfiguration.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "53024"
},
{
"name": "Makefile",
"bytes": "3662"
},
{
"name": "Python",
"bytes": "1073792"
},
{
"name": "Shell",
"bytes": "9820"
}
],
"symlink_target": ""
}
|
from sys import stderr
from os import environ
from urlparse import urlparse
from StringIO import StringIO
from zipfile import ZipFile, ZIP_DEFLATED
from time import time
from flask import Flask
from flask import request
from flask import Response
from flask import render_template
from osgeo import ogr
from util import json_encode, bool
from geo import features_geojson, QueryError
from geo import get_intersecting_features, get_matching_features
from census import census_url, get_features as census_features
cors = 'Access-Control-Allow-Origin'
app = Flask(__name__)
def is_census_datasource(environ):
''' Return true if the environment specifies the U.S. Census datasource.
'''
return environ.get('GEO_DATASOURCE', None) == census_url
def get_datasource(environ):
''' Return an environment-appropriate datasource.
For local data, this will be an OGR Datasource object.
'''
if is_census_datasource(environ):
# Use the value of the environment variable directly,
datasource = environ['GEO_DATASOURCE']
else:
# Or just open datasource.shp with OGR.
datasource = ogr.Open('datasource.shp')
return datasource
@app.route('/')
def hello():
host_port = urlparse(request.base_url).netloc.encode('utf-8')
is_downloadable = not is_census_datasource(environ)
is_us_census = is_census_datasource(environ)
return render_template('index.html', **locals())
@app.route('/.well-known/status')
def status():
datasource = get_datasource(environ)
status = {
'status': 'ok' if bool(datasource) else 'Bad datasource: %s' % repr(datasource),
'updated': int(time()),
'dependencies': [],
'resources': {}
}
body = json_encode(status)
return Response(body, headers={'Content-type': 'application/json', cors: '*'})
@app.route("/areas")
def areas():
''' Retrieve geographic areas.
'''
is_census = is_census_datasource(environ)
lat = float(request.args['lat'])
lon = float(request.args['lon'])
include_geom = bool(request.args.get('include_geom', True))
json_callback = request.args.get('callback', None)
layer_names = is_census and request.args.get('layers', '')
layer_names = layer_names and set(layer_names.split(','))
# This. Is. Python.
ogr.UseExceptions()
point = ogr.Geometry(wkt='POINT(%f %f)' % (lon, lat))
if is_census:
features = census_features(point, include_geom, layer_names)
else:
datasource = get_datasource(environ)
features = get_intersecting_features(datasource, point, include_geom)
body, mime = features_geojson(features, json_callback)
return Response(body, headers={'Content-type': mime, cors: '*'})
@app.route('/select')
def select():
''' Retrieve features.
'''
if is_census_datasource(environ):
error = "Can't select individual features from " + census_url
return Response(render_template('error.html', error=error), status=404)
where_clause = request.args.get('where', None)
where_clause = where_clause and str(where_clause)
page_number = int(request.args.get('page', 1))
include_geom = bool(request.args.get('include_geom', True))
json_callback = request.args.get('callback', None)
# This. Is. Python.
ogr.UseExceptions()
try:
datasource = get_datasource(environ)
features = get_matching_features(datasource, where_clause, page_number, include_geom)
except QueryError, e:
body, mime = json_encode({'error': str(e)}), 'application/json'
if json_callback:
body = '%s(%s);\n' % (json_callback, body)
mime = 'text/javascript'
return Response(body, status=400, headers={'Content-type': mime, cors: '*'})
else:
body, mime = features_geojson(features, json_callback)
return Response(body, headers={'Content-type': mime, cors: '*'})
@app.errorhandler(404)
def error_404(error):
return render_template('error.html', error=str(error))
@app.route('/datasource.zip')
def download_zip():
if is_census_datasource(environ):
error = "Can't download all of " + census_url
return Response(render_template('error.html', error=error), status=404)
buffer = StringIO()
archive = ZipFile(buffer, 'w', ZIP_DEFLATED)
archive.write('datasource.shp')
archive.write('datasource.shx')
archive.write('datasource.dbf')
archive.write('datasource.prj')
archive.close()
return Response(buffer.getvalue(), headers={'Content-Type': 'application/zip'})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
{
"content_hash": "42f78c0636c35790517b40b6ec89fc76",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 93,
"avg_line_length": 30.72077922077922,
"alnum_prop": 0.652716127668569,
"repo_name": "codeforamerica/mdc-municipality-geo-api",
"id": "a56258ac3277e74476e5056e58f34ec2eb5e6119",
"size": "4731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37"
},
{
"name": "HTML",
"bytes": "9338"
},
{
"name": "Python",
"bytes": "34126"
},
{
"name": "Shell",
"bytes": "3315"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import namedtuple
import itertools
import six
from .exceptions import InvalidParametersException
UrlParameter = UP = namedtuple('UrlParameter', ['url_component', 'required'])
UP.__new__.__defaults__ = (False, )
VariableParameter = VP = namedtuple('VariableParameter', ['index'])
def _get_property(item):
def get_item(self):
return self._params.get(item)
def set_item(self, value):
if value is not None:
self._params[item] = value
elif item in self._params:
del self._params[item]
def del_item(self):
try:
del self._params[item]
except KeyError:
pass
return property(get_item, set_item, del_item)
_META_INFO_NAME = str('MetaInfo')
_META_INFO_FIELDS = ['preset_prefix', 'index_prefix', 'custom_prefix', 'item_prefix']
_META_INFO_DEFAULT = {field_name: '' for field_name in _META_INFO_FIELDS}
_META_INFO_DEFAULT['abstract'] = False
def _copy_meta(cls_meta, meta, fields):
if cls_meta:
for field in fields:
if hasattr(cls_meta, field):
setattr(meta, field, getattr(cls_meta, field))
class ParameterMeta(type):
def __new__(mcs, name, bases, dct):
new_cls = super(ParameterMeta, mcs).__new__(mcs, name, bases, dct)
param_base = getattr(new_cls, 'meta', None)
new_cls.meta = meta = type(_META_INFO_NAME, (object, ), _META_INFO_DEFAULT)()
_copy_meta(param_base, meta, _META_INFO_FIELDS)
_copy_meta(dct.get('Meta'), meta, _META_INFO_FIELDS + ['abstract'])
cls_parameters = [(p_name, p)
for p_name, p in six.iteritems(dct)
if isinstance(p, UrlParameter)]
cls_c_parameters = [(p_name, p)
for p_name, p in six.iteritems(dct)
if isinstance(p, VariableParameter)]
if param_base:
meta.parameters = parameters = param_base.parameters[:]
parameters.extend(cls_parameters)
meta.custom_parameters = c_parameters = param_base.custom_parameters[:]
c_parameters.extend(cls_c_parameters)
meta.parameter_names = parameter_names = param_base.parameter_names[:]
parameter_names.extend(i[0] for i in itertools.chain(cls_parameters, cls_c_parameters))
else:
meta.parameters = parameters = cls_parameters
meta.custom_parameters = c_parameters = cls_c_parameters
meta.parameter_names = [i[0] for i in itertools.chain(cls_parameters, cls_c_parameters)]
meta.required_components = required_components = set()
meta.component_parameters = component_rev = {}
if not getattr(meta, 'abstract', False):
custom_prefix = meta.custom_prefix
for p_name, param in parameters:
url_comp = param.url_component
if param.required:
required_components.add(url_comp)
setattr(new_cls, p_name, _get_property(url_comp))
component_rev[url_comp] = p_name
for p_name, param in c_parameters:
url_comp = '{0}{1}'.format(custom_prefix, param.index)
setattr(new_cls, p_name, _get_property(url_comp))
component_rev[url_comp] = p_name
return new_cls
class AbstractUrlGenerator(six.with_metaclass(ParameterMeta)):
class Meta(object):
abstract = True
def __init__(self, params=None, **kwargs):
if self.meta.abstract:
raise ValueError("Cannot instantiate an abstract UrlGenerator class.")
if isinstance(params, self.__class__):
self._params = params._params.copy()
else:
self._params = {}
self.update(params)
self.update_from_kwargs(kwargs)
def __len__(self):
return len(self._params)
def __repr__(self):
cp = self.meta.component_parameters
items = ', '.join('{0}={1!r}'.format(cp[k], v)
for k, v in six.iteritems(self._params))
return '<{0}: {1}>'.format(self.__class__.__name__, items)
def _update_from_dict(self, d):
names = self.meta.parameter_names
for k, v in six.iteritems(d):
if k in names:
setattr(self, k, v)
else:
raise ValueError("Invalid field name '{0}'.".format(k))
def copy(self):
new_obj = self.__class__()
new_obj._params = self._params.copy()
return new_obj
def update(self, other=None, **kwargs):
if other:
if isinstance(other, AbstractUrlGenerator):
for name in self.meta.parameter_names:
setattr(self, name, getattr(other, name))
elif isinstance(other, dict):
self._update_from_dict(other)
else:
raise ValueError("Invalid type for update.")
self.update_from_kwargs(kwargs)
def update_from_kwargs(self, kwargs):
names = self.meta.parameter_names
for k, v in six.iteritems(kwargs):
if k in names:
if v is not None:
setattr(self, k, v)
else:
raise ValueError("Invalid field name '{0}'.".format(k))
def is_empty(self):
return not self._params
def validate(self):
missing = {self.meta.component_parameters[req]
for req in self.meta.required_components.difference(self._params)}
if missing:
raise InvalidParametersException("Parameters are required, but missing: {0}",
', '.join(missing))
def url(self, *args, **kwargs):
raise NotImplementedError("Method is not implemented.")
class UrlGenerator(AbstractUrlGenerator):
class Meta(object):
abstract = True
def url(self, *args, **kwargs):
self.validate()
return self._params.copy()
class PrefixUrlGenerator(AbstractUrlGenerator):
class Meta(object):
abstract = True
def __init__(self, *args, **kwargs):
super(PrefixUrlGenerator, self).__init__(*args, **kwargs)
if not self.meta.preset_prefix:
raise ValueError("Meta value preset_prefix is not set.")
if not self.meta.index_prefix:
raise ValueError("Meta value index_prefix is not set.")
def url(self, *args, **kwargs):
self.validate()
meta = self.meta
prefix = '{0}{1}'.format(meta.preset_prefix, meta.index_prefix)
params = {
'{0}{1}'.format(prefix, key): value
for key, value in six.iteritems(self._params)
}
return params
class EnumeratedUrlGenerator(AbstractUrlGenerator):
class Meta(object):
abstract = True
def url(self, index, *args, **kwargs):
self.validate()
meta = self.meta
prefix = '{0}{1}{2}'.format(meta.preset_prefix, meta.index_prefix, meta.item_prefix)
enum_params = {
'{0}{1}{2}'.format(prefix, index, key): value
for key, value in six.iteritems(self._params)
}
return enum_params
|
{
"content_hash": "f011791d767bf3634c1af8c73f789db5",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 100,
"avg_line_length": 36.039800995024876,
"alnum_prop": 0.577167310877968,
"repo_name": "merll/server-side-tracking",
"id": "e130d9714db518497b698d585a92c5efa1850aa4",
"size": "7268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server_tracking/parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63740"
}
],
"symlink_target": ""
}
|
import os, inspect
from compiler.error import AssemblerError, ParseError, ExtensionError
from compiler.executor import Executor
from compiler.tokenizer import Tokenizer, Token
from compiler.token import TokenType, SYMBOLS, KEYWORDS
from compiler.assembler import Assembler
from compiler.expression import Stack, Expression, ExpressionSolver
from compiler.instruction import Instruction, AsmExpressionContainer, JumpFlag
from compiler.memory import Memory
from compiler.utils import Utils
class ScriptCompiler(Executor):
def __init__(self, *, testing=False):
self.testing = testing
self.debug = False
self.mem = Memory()
self.jump_table = {}
self.solver = ExpressionSolver()
def compile_from_file(self, filename, *, debug=False):
path = os.path.abspath(filename)
ext = os.path.splitext(path)[1]
if ext != ".script":
raise ExtensionError("Unknown extension: \'{0}\'".format(ext))
with open(path, "r") as f:
return self.compile(f.read(), debug=debug)
def compile(self, string, *, debug=False):
self.debug = debug
# Read file contents and interpret it
t = Tokenizer()
t.load(string)
self.tokens = t.tokenize()
print("\nTokens:")
for t in self.tokens: print(" {0}\t\t{1}".format(str(t.value), str(t.token)))
(exprs, asm) = self._parse(self.tokens)
a = Assembler(mem_size=100, testing=self.testing)
output = a.load(asm)
return output
def _print_expr_tree(self, exprs, prefix=""):
if len(exprs) == 0: return
idx = 0
curr = exprs[idx]
while curr != None:
print("{0}{1}".format(prefix, curr))
if len(curr.expressions) != 0:
self._print_expr_tree(curr.expressions, prefix + "\t")
if idx + 1 < len(exprs):
idx += 1
curr = exprs[idx]
else:
curr = None
def _parse_expr_recursive(self, tokens):
exprs = []
temp = []
expr_stack = Stack()
block_expr = None
in_block = False
level = 0
idx = 0
while idx < len(tokens):
# increment
t = tokens[idx]
idx += 1
# start parsing tokens
if t.token == TokenType.FuncStart: # {
# discard token
# increment level
level += 1
# init an expression on the stack
e = Expression(temp)
expr_stack.push(e)
temp = []
# set inblock to true
if not in_block: in_block = True
else: pass # already in a block
elif t.token == TokenType.FuncEnd: # }
# discard token
# increment level
level -= 1
if level > 0:
curr = expr_stack.pop()
prev = expr_stack.pop()
prev.expressions.append(curr)
expr_stack.push(prev)
elif level == 0:
in_block = False
curr = expr_stack.pop()
# we're now at the lowest level and there is no
# other block on the stack (...shouldn't be atleast).
exprs.append(curr)
else:
pass # error?
elif t.token == TokenType.SemiColon:
# discard token
# now turn temp list into an expression
e = Expression(temp)
temp = []
if in_block:
curr = expr_stack.pop()
curr.expressions.append(e)
expr_stack.push(curr)
else:
exprs.append(e)
else: # just add the token to the temp list
temp.append(t)
self._print_expr_tree(exprs) # debug
return exprs
def _handle_assignment(self, ex):
"""
if the identifier does not exist, create a reference,
solve the expression with the 'result_var' set to this identifier.
if the identifier exists, create a temp reference to add the
expression result into, then add the instructions to move the temp
result variable into the reference.
"""
identifier = str(ex.tokens[0].value)
# skip the identifier and the '=' char
relevant_tokens = ex.tokens[2:]
asm = AsmExpressionContainer(ex)
# reference does not exist
if not self.mem.has_reference(identifier):
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(identifier, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
self.mem.add_reference(identifier, self.mem.get_reference(relevant_tokens[0].value))
else:
# several tokens, let's solve it
self.mem.add_reference(identifier)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=identifier)
asm.merge(instructions)
# reference exists
else:
temp = Memory.gen_temp_name()
#self.mem.add_reference(temp)
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(temp, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
else:
# several tokens, let's solve it
self.mem.add_reference(temp)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=temp)
asm.merge(instructions)
# the 'temp' variabel may be loaded in the
# AC, but just to be sure we do it again.
asm.add(Instruction("LDA", variable=temp, comment="variable 're-assignment'"))
asm.add(Instruction("STA", variable=identifier))
return asm
def _handle_if(self, ex):
# skip the identifier and the '=' char
relevant_tokens = ex.tokens[2:len(ex.tokens)-1]
asm = AsmExpressionContainer(ex)
result_var = ""
if len(relevant_tokens) == 1 and relevant_tokens[0].token == TokenType.Identifier \
and not relevant_tokens[0].value.isdigit():
# single token with a value, should be dynamic
#print("IT'S AN IDENTIFIER")
var_name = str(relevant_tokens[0].value)
result_var = var_name
#self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
else:
temp = Memory.gen_temp_name()
#val = int(self.solver.solve_expr(ex.tokens[2:len(ex.tokens)-1], self.mem, None))
#ex.value = val
#var_name = add_mem_ref(val)
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(temp, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
#self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
temp = relevant_tokens[0].value
else:
# several tokens, let's solve it
self.mem.add_reference(temp)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=temp)
asm.merge(instructions)
result_var = temp
asm.load(result_var)
#print("a.load(var_name); == " + var_name)
jp_name = Memory.gen_jump_name()
#asm.load(temp)
asm.add(Instruction("BRZ", jump=jp_name, comment="jump if zero"))
for e in ex.expressions:
ae = self._handle_expr(e)
if ae is not None:
asm.asm_expressions.append(ae)
for aa in asm.asm_expressions:
instrs = aa.get_instructions()
for i in instrs:
asm.add(i)
asm.add(JumpFlag(jp_name))
return asm
def _handle_func_call(self, ex):
# TODO: function lookup table with arument count and such
# cause right now all we have is "print" and "read"
identifier = str(ex.tokens[2].value)
a = AsmExpressionContainer(ex)
name = str(ex.tokens[0].value)
if name == "print":
# identifier is a constant
# so we just print it
if identifier.isdigit():
temp = Memory.gen_temp_name()
self.mem.add_reference(temp, identifier)
a.load(temp)
a.do_print()
else:
a.load(identifier)
a.do_print()
elif name == "read":
a.do_read()
if self.mem.has_reference(identifier):
temp = Memory.gen_temp_name()
self.mem.add_reference(temp)
a.add(Instruction("STA", variable=temp, comment="store input"))
a.add(Instruction("LDA", variable=temp, comment="variable 're-assignment'"))
a.add(Instruction("STA", variable=identifier))
else:
print("im so done with this shit")
return a
def _handle_expr(self, ex):
"""
evaluate an expression and generate assembly for it
"""
# returns true or false
def expr_matches(expr, tokens):
if len(expr.tokens) < len(tokens): return False
for idx, val in enumerate(tokens):
if str(val) != str(expr.tokens[idx].token):
return False
return True
match_assignment = lambda x: expr_matches(x, [TokenType.Identifier, TokenType.Equals])
match_condition = lambda x: expr_matches(x, [TokenType.Conditional, TokenType.LParen])
match_func = lambda x: expr_matches(x, [TokenType.Function, TokenType.LParen])
# VARIABLE ASSIGMENT
if match_assignment(ex):
asm = self._handle_assignment(ex)
return asm
elif match_condition(ex): # IF STATEMENT
asm = self._handle_if(ex)
return asm
elif match_func(ex):
asm = self._handle_func_call(ex)
return asm
return None
def _bind_jumps(self, instructions):
def find_jump(instructions, alias):
for idx, instr in enumerate(instructions):
if instr.is_jump_endpoint:
for j in instr.jumps:
if alias == j.alias:
return (idx, instr)
return None
for inst in instructions:
if inst.invalidate_jump_bindings:
need = inst.jump
(line_idx, jump_inst) = find_jump(instructions, need)
if line_idx is None:
print("Error: What the f-...this shouldnt happen...")
inst.set_adr(line_idx)
return instructions
def _merge_jumps(self, instructions):
copy = [i for i in instructions]
skip = 0
for idx, inst in enumerate(copy):
jumps = []
inc = 1
if skip != 0:
skip -= 1
continue
if isinstance(inst, JumpFlag):
# with the way we create the instructions,
# there will always be another Instruction
# after a jump command.
jumps.append(inst)
nxt = copy[idx + inc]
while isinstance(nxt, JumpFlag):
jumps.append(nxt)
inc += 1
skip += 1
nxt = copy[idx + inc]
# next is now an Instruction (hopefully)
if not isinstance(nxt, Instruction):
print("Error: Instance was not an Instruction")
for jp in jumps:
nxt.add_jump(jp)
# Delete all the JumpFlags from the copy list
has_jumps = lambda lst: any([True for l in lst if isinstance(l, JumpFlag)])
while has_jumps(copy):
for idx, j in enumerate(copy):
if isinstance(j, JumpFlag):
del copy[idx]
continue
return copy
def _parse(self, tokens):
exprs = self._parse_expr_recursive(tokens)
asm_list = [] # AsmExpression
for ex in exprs:
asm_expr = self._handle_expr(ex)
if Utils.check_none_critical(asm_expr):
Utils.debug("Compiler Error!: 'asm_expr' cannot be None.")
asm_list.append(asm_expr)
g = []
mem_asm = self.mem.gen_asm()
g.extend(mem_asm)
# get the rest of the instructions
for expr in asm_list:
g.extend(expr.get_instructions())
g.append(Instruction("HLT", comment="exit"))
print("\nDebug preview:\n")
for idx, gg in enumerate(g):
print(str(idx) + ": " + str(gg))
instructions = self._merge_jumps(g)
instructions = self.mem.bind_mem(instructions)
if instructions is None:
print("Critical Error!: Memory bindings.")
return None
instructions = self._bind_jumps(instructions)
if Utils.check_none_critical(instructions):
print("Critical Error!: Jump bindings.")
return None
assembly = "\n".join([a.asm() for a in instructions])
print("\nCompiled:\n")
for idx, gg in enumerate(instructions):
print(str(idx) + ": " + str(gg))
return [], assembly
|
{
"content_hash": "e7865f855d7b0c0552bcc4a4eee66793",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 100,
"avg_line_length": 33.6,
"alnum_prop": 0.533014950166113,
"repo_name": "Syntox32/LittleMan",
"id": "edacee31f97cd7f1bd8edfe898e472c019833028",
"size": "14472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compiler/compiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "696"
},
{
"name": "Python",
"bytes": "52233"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('planilla', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='planilla',
name='curso',
field=models.ForeignKey(to='curso.Curso'),
),
]
|
{
"content_hash": "f8e6cf48f62bd8dea9ee25a7b0ddc6ac",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 20.72222222222222,
"alnum_prop": 0.5844504021447721,
"repo_name": "matt987/presente",
"id": "8a2363574c339a40723bb14a449c92556aff7b50",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presente/planilla/migrations/0002_auto_20150619_1939.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26219"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
@register.filter
def tohex(value):
"""Return repr in hex"""
try:
value = int(value)
except TypeError:
return "Unrecognized Number"
return "%x" % value
@register.filter
def tohexpair(value):
"""Return hex in couples"""
hval = tohex(value)
return ':'.join([hval[i:i+2] for i in range(0, len(hval),2)])
|
{
"content_hash": "c8423acdb058f19bda0c785840acfec6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 21.36842105263158,
"alnum_prop": 0.6305418719211823,
"repo_name": "fim/keymaker",
"id": "009400624eae956b47feba0cbac4aa0f320d8246",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keymaker/core/templatetags/core_extras.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6673"
},
{
"name": "HTML",
"bytes": "13278"
},
{
"name": "Python",
"bytes": "21366"
}
],
"symlink_target": ""
}
|
"""
logbook.compat
~~~~~~~~~~~~~~
Backwards compatibility with stdlib's logging package and the
warnings module.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import collections
import logging
import sys
import warnings
from datetime import date, datetime
import logbook
from logbook.helpers import u, string_types, iteritems
_epoch_ord = date(1970, 1, 1).toordinal()
def redirect_logging(set_root_logger_level=True):
"""Permanently redirects logging to the stdlib. This also
removes all otherwise registered handlers on root logger of
the logging system but leaves the other loggers untouched.
:param set_root_logger_level: controls of the default level of the legacy
root logger is changed so that all legacy log messages get redirected
to Logbook
"""
del logging.root.handlers[:]
logging.root.addHandler(RedirectLoggingHandler())
if set_root_logger_level:
logging.root.setLevel(logging.DEBUG)
class redirected_logging(object):
"""Temporarily redirects logging for all threads and reverts
it later to the old handlers. Mainly used by the internal
unittests::
from logbook.compat import redirected_logging
with redirected_logging():
...
"""
def __init__(self, set_root_logger_level=True):
self.old_handlers = logging.root.handlers[:]
self.old_level = logging.root.level
self.set_root_logger_level = set_root_logger_level
def start(self):
redirect_logging(self.set_root_logger_level)
def end(self, etype=None, evalue=None, tb=None):
logging.root.handlers[:] = self.old_handlers
logging.root.setLevel(self.old_level)
__enter__ = start
__exit__ = end
class LoggingCompatRecord(logbook.LogRecord):
def _format_message(self, msg, *args, **kwargs):
if kwargs:
assert not args
return msg % kwargs
else:
assert not kwargs
return msg % tuple(args)
class RedirectLoggingHandler(logging.Handler):
"""A handler for the stdlib's logging system that redirects
transparently to logbook. This is used by the
:func:`redirect_logging` and :func:`redirected_logging`
functions.
If you want to customize the redirecting you can subclass it.
"""
def __init__(self):
logging.Handler.__init__(self)
def convert_level(self, level):
"""Converts a logging level into a logbook level."""
if level >= logging.CRITICAL:
return logbook.CRITICAL
if level >= logging.ERROR:
return logbook.ERROR
if level >= logging.WARNING:
return logbook.WARNING
if level >= logging.INFO:
return logbook.INFO
return logbook.DEBUG
def find_extra(self, old_record):
"""Tries to find custom data from the old logging record. The
return value is a dictionary that is merged with the log record
extra dictionaries.
"""
rv = vars(old_record).copy()
for key in ('name', 'msg', 'args', 'levelname', 'levelno',
'pathname', 'filename', 'module', 'exc_info',
'exc_text', 'lineno', 'funcName', 'created',
'msecs', 'relativeCreated', 'thread', 'threadName',
'greenlet', 'processName', 'process'):
rv.pop(key, None)
return rv
def find_caller(self, old_record):
"""Tries to find the caller that issued the call."""
frm = sys._getframe(2)
while frm is not None:
if (frm.f_globals is globals() or
frm.f_globals is logbook.base.__dict__ or
frm.f_globals is logging.__dict__):
frm = frm.f_back
else:
return frm
def convert_time(self, timestamp):
"""Converts the UNIX timestamp of the old record into a
datetime object as used by logbook.
"""
return datetime.utcfromtimestamp(timestamp)
def convert_record(self, old_record):
"""Converts an old logging record into a logbook log record."""
args = old_record.args
kwargs = None
# Logging allows passing a mapping object, in which case args will be a mapping.
if isinstance(args, collections.Mapping):
kwargs = args
args = None
record = LoggingCompatRecord(old_record.name,
self.convert_level(old_record.levelno),
old_record.msg, args,
kwargs, old_record.exc_info,
self.find_extra(old_record),
self.find_caller(old_record))
record.time = self.convert_time(old_record.created)
return record
def emit(self, record):
logbook.dispatch_record(self.convert_record(record))
class LoggingHandler(logbook.Handler):
"""Does the opposite of the :class:`RedirectLoggingHandler`, it sends
messages from logbook to logging. Because of that, it's a very bad
idea to configure both.
This handler is for logbook and will pass stuff over to a logger
from the standard library.
Example usage::
from logbook.compat import LoggingHandler, warn
with LoggingHandler():
warn('This goes to logging')
"""
def __init__(self, logger=None, level=logbook.NOTSET, filter=None,
bubble=False):
logbook.Handler.__init__(self, level, filter, bubble)
if logger is None:
logger = logging.getLogger()
elif isinstance(logger, string_types):
logger = logging.getLogger(logger)
self.logger = logger
def get_logger(self, record):
"""Returns the logger to use for this record. This implementation
always return :attr:`logger`.
"""
return self.logger
def convert_level(self, level):
"""Converts a logbook level into a logging level."""
if level >= logbook.CRITICAL:
return logging.CRITICAL
if level >= logbook.ERROR:
return logging.ERROR
if level >= logbook.WARNING:
return logging.WARNING
if level >= logbook.INFO:
return logging.INFO
return logging.DEBUG
def convert_time(self, dt):
"""Converts a datetime object into a timestamp."""
year, month, day, hour, minute, second = dt.utctimetuple()[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
def convert_record(self, old_record):
"""Converts a record from logbook to logging."""
if sys.version_info >= (2, 5):
# make sure 2to3 does not screw this up
optional_kwargs = {'func': getattr(old_record, 'func_name')}
else:
optional_kwargs = {}
record = logging.LogRecord(old_record.channel,
self.convert_level(old_record.level),
old_record.filename,
old_record.lineno,
old_record.message,
(), old_record.exc_info,
**optional_kwargs)
for key, value in iteritems(old_record.extra):
record.__dict__.setdefault(key, value)
record.created = self.convert_time(old_record.time)
return record
def emit(self, record):
self.get_logger(record).handle(self.convert_record(record))
def redirect_warnings():
"""Like :func:`redirected_warnings` but will redirect all warnings
to the shutdown of the interpreter:
.. code-block:: python
from logbook.compat import redirect_warnings
redirect_warnings()
"""
redirected_warnings().__enter__()
class redirected_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context, and logs warnings using the logbook system.
The :attr:`~logbook.LogRecord.channel` attribute of the log record will be
the import name of the warning.
Example usage:
.. code-block:: python
from logbook.compat import redirected_warnings
from warnings import warn
with redirected_warnings():
warn(DeprecationWarning('logging should be deprecated'))
"""
def __init__(self):
self._entered = False
def message_to_unicode(self, message):
try:
return u(str(message))
except UnicodeError:
return str(message).decode('utf-8', 'replace')
def make_record(self, message, exception, filename, lineno):
category = exception.__name__
if exception.__module__ not in ('exceptions', 'builtins'):
category = exception.__module__ + '.' + category
rv = logbook.LogRecord(category, logbook.WARNING, message)
# we don't know the caller, but we get that information from the
# warning system. Just attach them.
rv.filename = filename
rv.lineno = lineno
return rv
def start(self):
if self._entered: # pragma: no cover
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._showwarning = warnings.showwarning
def showwarning(message, category, filename, lineno,
file=None, line=None):
message = self.message_to_unicode(message)
record = self.make_record(message, category, filename, lineno)
logbook.dispatch_record(record)
warnings.showwarning = showwarning
def end(self, etype=None, evalue=None, tb=None):
if not self._entered: # pragma: no cover
raise RuntimeError("Cannot exit %r without entering first" % self)
warnings.filters = self._filters
warnings.showwarning = self._showwarning
__enter__ = start
__exit__ = end
|
{
"content_hash": "8b52151514afee6cf99892da2c079acd",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 88,
"avg_line_length": 34.78787878787879,
"alnum_prop": 0.6006581494386373,
"repo_name": "pombredanne/logbook",
"id": "b65ac006043f0ada51b2c6b8e19e1c8d435828da",
"size": "10356",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "logbook/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "301500"
}
],
"symlink_target": ""
}
|
"""Kraken Softimage Plug-in."""
import os
def dccTest():
"""Returns true or false after checking if the `KRAKEN_DCC` environment
variable is set to use this plugin.
.. note::
The variable value to activate the Softimage plugin is: `Softimage`.
Returns:
bool: True if the environment variable is set to use this plugin.
"""
krakenDCC = os.environ.get('KRAKEN_DCC')
if krakenDCC == "Softimage":
return True
else:
return False
|
{
"content_hash": "b36944a797a7eb97a5e8182cb8d5a460",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 22.5,
"alnum_prop": 0.6383838383838384,
"repo_name": "oculusstorystudio/kraken",
"id": "3c47d6960c9097c90aac4839a75d4a7472197e86",
"size": "495",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop_OSS",
"path": "Python/kraken/plugins/si_plugin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "136"
},
{
"name": "Batchfile",
"bytes": "2584"
},
{
"name": "CSS",
"bytes": "21033"
},
{
"name": "MAXScript",
"bytes": "521"
},
{
"name": "Mathematica",
"bytes": "4442959"
},
{
"name": "Python",
"bytes": "2841362"
},
{
"name": "Shell",
"bytes": "2689"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import coremltools as ct
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import get_op_types_in_program
from .compression_passes import (WeightAffineQuantizer, WeightPalettizer,
WeightSparsifier)
np.random.seed(1984)
def _get_conv_program():
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 30, 10, 10))], opset_version=ct.target.iOS16)
def prog(x):
conv_weight = np.random.rand(90, 30, 2, 2).astype(np.float32)
x = mb.conv(x=x, weight=conv_weight)
return x
return prog
class TestBasicCompressionGraphPass:
# Most of the numerical tests are already convered in coremltools.tests.ml_program.test_compression_utils
# This test is checking the basic behavior of the graph pass classes
@staticmethod
@pytest.mark.parametrize(
"fake_compression",
[True, False],
)
def test_affine_quantizer(fake_compression):
quantizer = WeightAffineQuantizer(fake_compression=fake_compression, op_selector=lambda const: True)
prog = _get_conv_program()
quantizer.apply(prog)
expected_ops = ["constexpr_affine_dequantize", "conv"] if not fake_compression else ["conv"]
assert get_op_types_in_program(prog) == expected_ops
@staticmethod
@pytest.mark.parametrize(
"fake_compression",
[True, False],
)
def test_weight_sparsifier(fake_compression):
quantizer = WeightSparsifier(
fake_compression=fake_compression,
op_selector=lambda const: True,
mode="percentile_based",
target_percentile=0.75)
prog = _get_conv_program()
quantizer.apply(prog)
expected_ops = ["constexpr_sparse_to_dense", "conv"] if not fake_compression else ["conv"]
assert get_op_types_in_program(prog) == expected_ops
@staticmethod
@pytest.mark.parametrize(
"fake_compression",
[True, False],
)
def test_weight_palettization(fake_compression):
quantizer = WeightPalettizer(
fake_compression=fake_compression,
op_selector=lambda const: True,
mode="uniform",
nbits=4,
)
prog = _get_conv_program()
quantizer.apply(prog)
expected_ops = ["constexpr_lut_to_dense", "conv"] if not fake_compression else ["conv"]
assert get_op_types_in_program(prog) == expected_ops
|
{
"content_hash": "a80058d75267bb89f4517b8c92ad3411",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 109,
"avg_line_length": 36.76470588235294,
"alnum_prop": 0.6488,
"repo_name": "apple/coremltools",
"id": "b0f66d317598149460223910723ee8e24c1cd615",
"size": "2720",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "coremltools/converters/mil/mil/passes/test_compression_passes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79917"
},
{
"name": "C++",
"bytes": "1420033"
},
{
"name": "CMake",
"bytes": "20418"
},
{
"name": "Makefile",
"bytes": "4258"
},
{
"name": "Mustache",
"bytes": "2676"
},
{
"name": "Objective-C",
"bytes": "4061"
},
{
"name": "Objective-C++",
"bytes": "28933"
},
{
"name": "Python",
"bytes": "5004520"
},
{
"name": "Shell",
"bytes": "19662"
}
],
"symlink_target": ""
}
|
from compileall import compile_file
from fnmatch import fnmatch
import os
from fabric.api import task
@task
def clean(directory):
"""
Clean project python compiled files
"""
for root, paths, files in os.walk(directory):
for file in files:
if fnmatch(file, '*.pyc'):
try:
print "Removing file %s" % os.path.join(root, file)
os.remove(os.path.join(root, file))
except OSError as e:
print e
exit(1)
@task
def compile(directory):
"""
Compile project files
"""
for root, paths, files in os.walk(directory):
for file in files:
if fnmatch(file, '*.py'):
compile_file(os.path.join(root, file))
|
{
"content_hash": "dbfb4116895598e8f113d38dff8733c8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 23.323529411764707,
"alnum_prop": 0.5397225725094578,
"repo_name": "kpachnis/dj-fabric-tasks",
"id": "8ab27d6c66674ab592a5a1ca46d3c2460dfb0619",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12851"
}
],
"symlink_target": ""
}
|
import requests
from apps.core.models import UserProfile
from django.conf import settings
from social.exceptions import AuthFailed
USER_INFO_LI_REQUEST_URL = ('https://api.linkedin.com/v1/people/~:('
'id,'
'firstName,'
'lastName,'
'emailAddress,'
'pictureUrl,'
'publicProfileUrl)'
'?oauth2_access_token={}'
'&format=json')
def update_or_create_user_profile(backend, user, response, *args, **kwargs):
li_access_token = response.get('access_token')
li_resp = requests.get(USER_INFO_LI_REQUEST_URL.format(li_access_token))
li_resp_json = li_resp.json()
li_email = li_resp_json.get('emailAddress')
if li_email not in settings.VALID_EMAILS:
raise AuthFailed(backend, 'This is not a whitelisted email')
user_profile, created = UserProfile.objects.get_or_create(user=user)
user_profile.li_email = li_email
user_profile.li_first_name = li_resp_json.get('firstName')
user_profile.li_last_name = li_resp_json.get('lastName')
user_profile.li_picture_url = li_resp_json.get('pictureUrl')
user_profile.li_profile_url = li_resp_json.get('publicProfileUrl')
user_profile.save()
|
{
"content_hash": "653ac776e8f4f609eb11c21ad063604d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 39.705882352941174,
"alnum_prop": 0.5955555555555555,
"repo_name": "cgvt/website",
"id": "f648a48985663147bbaeea1e9bfabaa2a8b13cd0",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgvt_site/cgvt_site/auth_pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4189"
},
{
"name": "HTML",
"bytes": "25315"
},
{
"name": "JavaScript",
"bytes": "1732"
},
{
"name": "Python",
"bytes": "12335"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
from .macro import WrappedMacroBuilder
class G4Run(WrappedMacroBuilder):
def __init__(self, events):
super(G4Run, self).__init__()
self.events = events
self.add_command("/run/beamOn %d" % self.events)
def add_scorer(self, scorer):
# TODO: type checking?
self.wrap(scorer)
|
{
"content_hash": "ee59c6875dff2c227bc92bb031c6f3c3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.6211180124223602,
"repo_name": "janpipek/pyg4app",
"id": "dae56eca9e4135f258f6487d8ed26691c780b4f5",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "g4app/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13508"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import os.path
import stat
import shutil
import tempfile
import unittest
from StringIO import StringIO
try:
from svn import core, repos
has_svn = True
except:
has_svn = False
from trac.log import logger_factory
from trac.test import TestSetup
from trac.core import TracError
from trac.util.datefmt import utc
from trac.versioncontrol import Changeset, Node, NoSuchChangeset
from trac.versioncontrol.svn_fs import SubversionRepository
from trac.versioncontrol import svn_fs
REPOS_PATH = os.path.join(tempfile.gettempdir(), 'trac-svnrepos')
HEAD = 21
class SubversionRepositoryTestSetup(TestSetup):
def setUp(self):
dumpfile = open(os.path.join(os.path.split(__file__)[0],
'svnrepos.dump'))
svn_fs._import_svn()
core.apr_initialize()
pool = core.svn_pool_create(None)
dumpstream = None
try:
if os.path.exists(REPOS_PATH):
print 'trouble ahead with db/rep-cache.db... see #8278'
r = repos.svn_repos_create(REPOS_PATH, '', '', None, None, pool)
if hasattr(repos, 'svn_repos_load_fs2'):
repos.svn_repos_load_fs2(r, dumpfile, StringIO(),
repos.svn_repos_load_uuid_default, '',
0, 0, None, pool)
else:
dumpstream = core.svn_stream_from_aprfile(dumpfile, pool)
repos.svn_repos_load_fs(r, dumpstream, None,
repos.svn_repos_load_uuid_default, '',
None, None, pool)
finally:
if dumpstream:
core.svn_stream_close(dumpstream)
core.svn_pool_destroy(pool)
core.apr_terminate()
def tearDown(self):
if os.name == 'nt':
# The Windows version of 'shutil.rmtree' doesn't override the
# permissions of read-only files, so we have to do it ourselves:
format_file = os.path.join(REPOS_PATH, 'db', 'format')
if os.path.isfile(format_file):
os.chmod(format_file, stat.S_IRWXU)
os.chmod(os.path.join(REPOS_PATH, 'format'), stat.S_IRWXU)
shutil.rmtree(REPOS_PATH)
class SubversionRepositoryTestCase(unittest.TestCase):
def setUp(self):
self.repos = SubversionRepository(REPOS_PATH,
{'name': 'repo', 'id': 1},
logger_factory('test'))
def tearDown(self):
self.repos = None
def test_repos_normalize_path(self):
self.assertEqual('/', self.repos.normalize_path('/'))
self.assertEqual('/', self.repos.normalize_path(''))
self.assertEqual('/', self.repos.normalize_path(None))
self.assertEqual(u'tête', self.repos.normalize_path(u'tête'))
self.assertEqual(u'tête', self.repos.normalize_path(u'/tête'))
self.assertEqual(u'tête', self.repos.normalize_path(u'tête/'))
self.assertEqual(u'tête', self.repos.normalize_path(u'/tête/'))
def test_repos_normalize_rev(self):
self.assertEqual(HEAD, self.repos.normalize_rev('latest'))
self.assertEqual(HEAD, self.repos.normalize_rev('head'))
self.assertEqual(HEAD, self.repos.normalize_rev(''))
self.assertRaises(NoSuchChangeset,
self.repos.normalize_rev, 'something else')
self.assertEqual(HEAD, self.repos.normalize_rev(None))
self.assertEqual(11, self.repos.normalize_rev('11'))
self.assertEqual(11, self.repos.normalize_rev(11))
def test_rev_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0))
self.assertEqual(None, self.repos.previous_rev(1))
self.assertEqual(HEAD, self.repos.youngest_rev)
self.assertEqual(6, self.repos.next_rev(5))
self.assertEqual(7, self.repos.next_rev(6))
# ...
self.assertEqual(None, self.repos.next_rev(HEAD))
self.assertRaises(NoSuchChangeset, self.repos.normalize_rev, HEAD + 1)
def test_rev_path_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0, u'tête'))
self.assertEqual(None, self.repos.previous_rev(1, u'tête'))
self.assertEqual(HEAD, self.repos.youngest_rev)
self.assertEqual(6, self.repos.next_rev(5, u'tête'))
self.assertEqual(13, self.repos.next_rev(6, u'tête'))
# ...
self.assertEqual(None, self.repos.next_rev(HEAD, u'tête'))
# test accentuated characters
self.assertEqual(None, self.repos.previous_rev(17, u'tête/R\xe9sum\xe9.txt'))
self.assertEqual(17, self.repos.next_rev(16, u'tête/R\xe9sum\xe9.txt'))
def test_has_node(self):
self.assertEqual(False, self.repos.has_node(u'/tête/dir1', 3))
self.assertEqual(True, self.repos.has_node(u'/tête/dir1', 4))
self.assertEqual(True, self.repos.has_node(u'/tête/dir1'))
def test_get_node(self):
node = self.repos.get_node(u'/tête')
self.assertEqual(u'tête', node.name)
self.assertEqual(u'/tête', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(HEAD, node.rev)
self.assertEqual(datetime(2007, 4, 30, 17, 45, 26, 0, utc),
node.last_modified)
node = self.repos.get_node(u'/tête/README.txt')
self.assertEqual('README.txt', node.name)
self.assertEqual(u'/tête/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(3, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 0, utc), node.last_modified)
def test_get_node_specific_rev(self):
node = self.repos.get_node(u'/tête', 1)
self.assertEqual(u'tête', node.name)
self.assertEqual(u'/tête', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(1, node.rev)
self.assertEqual(datetime(2005, 4, 1, 10, 0, 52, 0, utc), node.last_modified)
node = self.repos.get_node(u'/tête/README.txt', 2)
self.assertEqual('README.txt', node.name)
self.assertEqual(u'/tête/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(2, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 12, 18, 0, utc), node.last_modified)
def test_get_dir_entries(self):
node = self.repos.get_node(u'/tête')
entries = node.get_entries()
self.assertEqual('dir1', entries.next().name)
self.assertEqual('mpp_proc', entries.next().name)
self.assertEqual('v2', entries.next().name)
self.assertEqual('README3.txt', entries.next().name)
self.assertEqual(u'R\xe9sum\xe9.txt', entries.next().name)
self.assertEqual('README.txt', entries.next().name)
self.assertRaises(StopIteration, entries.next)
def test_get_file_entries(self):
node = self.repos.get_node(u'/tête/README.txt')
entries = node.get_entries()
self.assertRaises(StopIteration, entries.next)
def test_get_dir_content(self):
node = self.repos.get_node(u'/tête')
self.assertEqual(None, node.content_length)
self.assertEqual(None, node.content_type)
self.assertEqual(None, node.get_content())
def test_get_file_content(self):
node = self.repos.get_node(u'/tête/README.txt')
self.assertEqual(8, node.content_length)
self.assertEqual('text/plain', node.content_type)
self.assertEqual('A test.\n', node.get_content().read())
def test_get_dir_properties(self):
f = self.repos.get_node(u'/tête')
props = f.get_properties()
self.assertEqual(1, len(props))
def test_get_file_properties(self):
f = self.repos.get_node(u'/tête/README.txt')
props = f.get_properties()
self.assertEqual('native', props['svn:eol-style'])
self.assertEqual('text/plain', props['svn:mime-type'])
def test_created_path_rev(self):
node = self.repos.get_node(u'/tête/README3.txt', 15)
self.assertEqual(14, node.rev)
self.assertEqual(u'/tête/README3.txt', node.path)
self.assertEqual(14, node.created_rev)
self.assertEqual(u'tête/README3.txt', node.created_path)
def test_created_path_rev_parent_copy(self):
node = self.repos.get_node('/tags/v1/README.txt', 15)
self.assertEqual(3, node.rev)
self.assertEqual('/tags/v1/README.txt', node.path)
self.assertEqual(3, node.created_rev)
self.assertEqual(u'tête/README.txt', node.created_path)
# Revision Log / node history
def test_get_node_history(self):
node = self.repos.get_node(u'/tête/README3.txt')
history = node.get_history()
self.assertEqual((u'tête/README3.txt', 14, 'copy'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'edit'), history.next())
self.assertEqual((u'tête/README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_limit(self):
node = self.repos.get_node(u'/tête/README3.txt')
history = node.get_history(2)
self.assertEqual((u'tête/README3.txt', 14, 'copy'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_follow_copy(self):
node = self.repos.get_node('/tags/v1/README.txt')
history = node.get_history()
self.assertEqual(('tags/v1/README.txt', 7, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'edit'), history.next())
self.assertEqual((u'tête/README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_copy_ancestry(self):
node = self.repos.get_node('/tags/v1/README.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/README.txt', 6)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
node = self.repos.get_node(u'/tête/README3.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/README2.txt', 13),
(u'tête/README.txt', 3)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
node = self.repos.get_node('/branches/v1x')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tags/v1.1', 11),
(u'branches/v1x', 9),
(u'tags/v1', 7),
(u'tête', 6)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_copy_ancestry_for_move(self):
node = self.repos.get_node(u'/tête/dir1/dir2', 5)
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/dir2', 4)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_branch_origin(self):
node = self.repos.get_node('/tags/v1/README.txt')
self.assertEqual(7, node.get_branch_origin())
node = self.repos.get_node(u'/tête/README3.txt')
self.assertEqual(14, node.get_branch_origin())
node = self.repos.get_node('/branches/v1x')
self.assertEqual(12, node.get_branch_origin())
node = self.repos.get_node(u'/tête/dir1/dir2', 5)
self.assertEqual(5, node.get_branch_origin())
# Revision Log / path history
def test_get_path_history(self):
history = self.repos.get_path_history(u'/tête/README2.txt', None)
self.assertEqual((u'tête/README2.txt', 14, 'delete'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_file(self):
history = self.repos.get_path_history('/tags/v1/README.txt', None)
self.assertEqual(('tags/v1/README.txt', 7, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_dir(self):
history = self.repos.get_path_history('/branches/v1x', None)
self.assertEqual(('branches/v1x', 12, 'copy'), history.next())
self.assertEqual(('tags/v1.1', 10, 'unknown'), history.next())
self.assertEqual(('branches/v1x', 11, 'delete'), history.next())
self.assertEqual(('branches/v1x', 9, 'edit'), history.next())
self.assertEqual(('branches/v1x', 8, 'copy'), history.next())
self.assertEqual(('tags/v1', 7, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
# Diffs
def _cmp_diff(self, expected, got):
if expected[0]:
old = self.repos.get_node(*expected[0])
self.assertEqual((old.path, old.rev), (got[0].path, got[0].rev))
if expected[1]:
new = self.repos.get_node(*expected[1])
self.assertEqual((new.path, new.rev), (got[1].path, got[1].rev))
self.assertEqual(expected[2], (got[2], got[3]))
def test_diff_file_different_revs(self):
diffs = self.repos.get_changes(u'tête/README.txt', 2, u'tête/README.txt', 3)
self._cmp_diff(((u'tête/README.txt', 2),
(u'tête/README.txt', 3),
(Node.FILE, Changeset.EDIT)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_file_different_files(self):
diffs = self.repos.get_changes('branches/v1x/README.txt', 12,
'branches/v1x/README2.txt', 12)
self._cmp_diff((('branches/v1x/README.txt', 12),
('branches/v1x/README2.txt', 12),
(Node.FILE, Changeset.EDIT)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_file_no_change(self):
diffs = self.repos.get_changes(u'tête/README.txt', 7,
'tags/v1/README.txt', 7)
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_different_revs(self):
diffs = self.repos.get_changes(u'tête', 4, u'tête', 8)
self._cmp_diff((None, (u'tête/dir1/dir2', 8),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, (u'tête/dir1/dir3', 8),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, (u'tête/README2.txt', 6),
(Node.FILE, Changeset.ADD)), diffs.next())
self._cmp_diff(((u'tête/dir2', 4), None,
(Node.DIRECTORY, Changeset.DELETE)), diffs.next())
self._cmp_diff(((u'tête/dir3', 4), None,
(Node.DIRECTORY, Changeset.DELETE)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_different_dirs(self):
diffs = self.repos.get_changes(u'tête', 1, 'branches/v1x', 12)
self._cmp_diff((None, ('branches/v1x/dir1', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/dir1/dir2', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/dir1/dir3', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/README.txt', 12),
(Node.FILE, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/README2.txt', 12),
(Node.FILE, Changeset.ADD)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_no_change(self):
diffs = self.repos.get_changes(u'tête', 7,
'tags/v1', 7)
self.assertRaises(StopIteration, diffs.next)
# Changesets
def test_changeset_repos_creation(self):
chgset = self.repos.get_changeset(0)
self.assertEqual(0, chgset.rev)
self.assertEqual('', chgset.message)
self.assertEqual('', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 9, 57, 41, 0, utc), chgset.date)
self.assertRaises(StopIteration, chgset.get_changes().next)
def test_changeset_added_dirs(self):
chgset = self.repos.get_changeset(1)
self.assertEqual(1, chgset.rev)
self.assertEqual('Initial directory layout.', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 10, 0, 52, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual(('branches', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertEqual(('tags', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertEqual((u'tête', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_edit(self):
chgset = self.repos.get_changeset(3)
self.assertEqual(3, chgset.rev)
self.assertEqual('Fixed README.\n', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/README.txt', Node.FILE, Changeset.EDIT,
u'tête/README.txt', 2), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_dir_moves(self):
chgset = self.repos.get_changeset(5)
self.assertEqual(5, chgset.rev)
self.assertEqual('Moved directories.', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 16, 25, 39, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/dir1/dir2', Node.DIRECTORY, Changeset.MOVE,
u'tête/dir2', 4), changes.next())
self.assertEqual((u'tête/dir1/dir3', Node.DIRECTORY, Changeset.MOVE,
u'tête/dir3', 4), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_copy(self):
chgset = self.repos.get_changeset(6)
self.assertEqual(6, chgset.rev)
self.assertEqual('More things to read', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 18, 56, 46, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/README2.txt', Node.FILE, Changeset.COPY,
u'tête/README.txt', 3), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_root_propset(self):
chgset = self.repos.get_changeset(13)
self.assertEqual(13, chgset.rev)
self.assertEqual('Setting property on the repository_dir root',
chgset.message)
changes = chgset.get_changes()
self.assertEqual(('/', Node.DIRECTORY, Changeset.EDIT, '/', 12),
changes.next())
self.assertEqual((u'tête', Node.DIRECTORY, Changeset.EDIT, u'tête', 6),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_base_path_rev(self):
chgset = self.repos.get_changeset(9)
self.assertEqual(9, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('branches/v1x/README.txt', Node.FILE,
Changeset.EDIT, u'tête/README.txt', 3),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_rename_and_edit(self):
chgset = self.repos.get_changeset(14)
self.assertEqual(14, chgset.rev)
changes = chgset.get_changes()
self.assertEqual((u'tête/README3.txt', Node.FILE,
Changeset.MOVE, u'tête/README2.txt', 13),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_edit_after_wc2wc_copy__original_deleted(self):
chgset = self.repos.get_changeset(16)
self.assertEqual(16, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('branches/v2', Node.DIRECTORY, Changeset.COPY,
'tags/v1.1', 14),
changes.next())
self.assertEqual(('branches/v2/README2.txt', Node.FILE,
Changeset.EDIT, u'tête/README2.txt', 6),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_fancy_rename_double_delete(self):
chgset = self.repos.get_changeset(19)
self.assertEqual(19, chgset.rev)
changes = chgset.get_changes()
self.assertEqual((u'tête/Xprimary_proc/Xprimary_pkg.vhd', Node.FILE,
Changeset.DELETE,
u'tête/Xprimary_proc/Xprimary_pkg.vhd', 18),
changes.next())
self.assertEqual((u'tête/mpp_proc', Node.DIRECTORY,
Changeset.MOVE, u'tête/Xprimary_proc', 18),
changes.next())
self.assertEqual((u'tête/mpp_proc/Xprimary_proc', Node.DIRECTORY,
Changeset.COPY, u'tête/Xprimary_proc', 18),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_utf_8(self):
chgset = self.repos.get_changeset(20)
self.assertEqual(20, chgset.rev)
self.assertEqual(u'Chez moi ça marche\n', chgset.message)
self.assertEqual(u'Jonas Borgström', chgset.author)
class ScopedSubversionRepositoryTestCase(unittest.TestCase):
def setUp(self):
self.repos = SubversionRepository(REPOS_PATH + u'/tête',
{'name': 'repo', 'id': 1},
logger_factory('test'))
def tearDown(self):
self.repos = None
def test_repos_normalize_path(self):
self.assertEqual('/', self.repos.normalize_path('/'))
self.assertEqual('/', self.repos.normalize_path(''))
self.assertEqual('/', self.repos.normalize_path(None))
self.assertEqual('dir1', self.repos.normalize_path('dir1'))
self.assertEqual('dir1', self.repos.normalize_path('/dir1'))
self.assertEqual('dir1', self.repos.normalize_path('dir1/'))
self.assertEqual('dir1', self.repos.normalize_path('/dir1/'))
def test_repos_normalize_rev(self):
self.assertEqual(HEAD, self.repos.normalize_rev('latest'))
self.assertEqual(HEAD, self.repos.normalize_rev('head'))
self.assertEqual(HEAD, self.repos.normalize_rev(''))
self.assertEqual(HEAD, self.repos.normalize_rev(None))
self.assertEqual(5, self.repos.normalize_rev('5'))
self.assertEqual(5, self.repos.normalize_rev(5))
def test_rev_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0))
self.assertEqual(1, self.repos.previous_rev(2))
self.assertEqual(HEAD, self.repos.youngest_rev)
self.assertEqual(2, self.repos.next_rev(1))
self.assertEqual(3, self.repos.next_rev(2))
# ...
self.assertEqual(None, self.repos.next_rev(HEAD))
def test_has_node(self):
self.assertEqual(False, self.repos.has_node('/dir1', 3))
self.assertEqual(True, self.repos.has_node('/dir1', 4))
def test_get_node(self):
node = self.repos.get_node('/dir1')
self.assertEqual('dir1', node.name)
self.assertEqual('/dir1', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(5, node.rev)
self.assertEqual(datetime(2005, 4, 1, 16, 25, 39, 0, utc), node.last_modified)
node = self.repos.get_node('/README.txt')
self.assertEqual('README.txt', node.name)
self.assertEqual('/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(3, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 0, utc), node.last_modified)
def test_get_node_specific_rev(self):
node = self.repos.get_node('/dir1', 4)
self.assertEqual('dir1', node.name)
self.assertEqual('/dir1', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(4, node.rev)
self.assertEqual(datetime(2005, 4, 1, 15, 42, 35, 0, utc), node.last_modified)
node = self.repos.get_node('/README.txt', 2)
self.assertEqual('README.txt', node.name)
self.assertEqual('/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(2, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 12, 18, 0, utc), node.last_modified)
def test_get_dir_entries(self):
node = self.repos.get_node('/')
entries = node.get_entries()
self.assertEqual('dir1', entries.next().name)
self.assertEqual('mpp_proc', entries.next().name)
self.assertEqual('v2', entries.next().name)
self.assertEqual('README3.txt', entries.next().name)
self.assertEqual(u'R\xe9sum\xe9.txt', entries.next().name)
self.assertEqual('README.txt', entries.next().name)
self.assertRaises(StopIteration, entries.next)
def test_get_file_entries(self):
node = self.repos.get_node('/README.txt')
entries = node.get_entries()
self.assertRaises(StopIteration, entries.next)
def test_get_dir_content(self):
node = self.repos.get_node('/dir1')
self.assertEqual(None, node.content_length)
self.assertEqual(None, node.content_type)
self.assertEqual(None, node.get_content())
def test_get_file_content(self):
node = self.repos.get_node('/README.txt')
self.assertEqual(8, node.content_length)
self.assertEqual('text/plain', node.content_type)
self.assertEqual('A test.\n', node.get_content().read())
def test_get_dir_properties(self):
f = self.repos.get_node('/dir1')
props = f.get_properties()
self.assertEqual(0, len(props))
def test_get_file_properties(self):
f = self.repos.get_node('/README.txt')
props = f.get_properties()
self.assertEqual('native', props['svn:eol-style'])
self.assertEqual('text/plain', props['svn:mime-type'])
# Revision Log / node history
def test_get_node_history(self):
node = self.repos.get_node('/README3.txt')
history = node.get_history()
self.assertEqual(('README3.txt', 14, 'copy'), history.next())
self.assertEqual(('README2.txt', 6, 'copy'), history.next())
self.assertEqual(('README.txt', 3, 'edit'), history.next())
self.assertEqual(('README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_follow_copy(self):
node = self.repos.get_node('dir1/dir3', )
history = node.get_history()
self.assertEqual(('dir1/dir3', 5, 'copy'), history.next())
self.assertEqual(('dir3', 4, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_copy_ancestry(self):
node = self.repos.get_node(u'/README3.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'README2.txt', 13),
(u'README.txt', 3)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_copy_ancestry_for_move(self):
node = self.repos.get_node(u'/dir1/dir2', 5)
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'dir2', 4)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_branch_origin(self):
node = self.repos.get_node(u'/README3.txt')
self.assertEqual(14, node.get_branch_origin())
node = self.repos.get_node(u'/dir1/dir2', 5)
self.assertEqual(5, node.get_branch_origin())
# Revision Log / path history
def test_get_path_history(self):
history = self.repos.get_path_history('dir3', None)
self.assertEqual(('dir3', 5, 'delete'), history.next())
self.assertEqual(('dir3', 4, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_file(self):
history = self.repos.get_path_history('README3.txt', None)
self.assertEqual(('README3.txt', 14, 'copy'), history.next())
self.assertEqual(('README2.txt', 6, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_dir(self):
history = self.repos.get_path_history('dir1/dir3', None)
self.assertEqual(('dir1/dir3', 5, 'copy'), history.next())
self.assertEqual(('dir3', 4, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_changeset_repos_creation(self):
chgset = self.repos.get_changeset(0)
self.assertEqual(0, chgset.rev)
self.assertEqual('', chgset.message)
self.assertEqual('', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 9, 57, 41, 0, utc), chgset.date)
self.assertRaises(StopIteration, chgset.get_changes().next)
def test_changeset_added_dirs(self):
chgset = self.repos.get_changeset(4)
self.assertEqual(4, chgset.rev)
self.assertEqual('More directories.', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 15, 42, 35, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual(('dir1', Node.DIRECTORY, 'add', None, -1),
changes.next())
self.assertEqual(('dir2', Node.DIRECTORY, 'add', None, -1),
changes.next())
self.assertEqual(('dir3', Node.DIRECTORY, 'add', None, -1),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_edit(self):
chgset = self.repos.get_changeset(3)
self.assertEqual(3, chgset.rev)
self.assertEqual('Fixed README.\n', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual(('README.txt', Node.FILE, Changeset.EDIT,
'README.txt', 2), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_dir_moves(self):
chgset = self.repos.get_changeset(5)
self.assertEqual(5, chgset.rev)
self.assertEqual('Moved directories.', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 16, 25, 39, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual(('dir1/dir2', Node.DIRECTORY, Changeset.MOVE,
'dir2', 4), changes.next())
self.assertEqual(('dir1/dir3', Node.DIRECTORY, Changeset.MOVE,
'dir3', 4), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_copy(self):
chgset = self.repos.get_changeset(6)
self.assertEqual(6, chgset.rev)
self.assertEqual('More things to read', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 18, 56, 46, 0, utc), chgset.date)
changes = chgset.get_changes()
self.assertEqual(('README2.txt', Node.FILE, Changeset.COPY,
'README.txt', 3), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_root_propset(self):
chgset = self.repos.get_changeset(13)
self.assertEqual(13, chgset.rev)
self.assertEqual('Setting property on the repository_dir root',
chgset.message)
changes = chgset.get_changes()
self.assertEqual(('/', Node.DIRECTORY, Changeset.EDIT, '/', 6),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_copy_from_outside_and_delete(self):
chgset = self.repos.get_changeset(21)
self.assertEqual(21, chgset.rev)
self.assertEqual('copy from outside of the scope + delete',
chgset.message)
changes = chgset.get_changes()
self.assertEqual(('v2', 'dir', Changeset.ADD, None, -1),
changes.next())
self.assertRaises(StopIteration, changes.next)
class RecentPathScopedRepositoryTestCase(unittest.TestCase):
def setUp(self):
self.repos = SubversionRepository(REPOS_PATH + u'/tête/dir1',
{'name': 'repo', 'id': 1},
logger_factory('test'))
def tearDown(self):
self.repos = None
def test_rev_navigation(self):
self.assertEqual(False, self.repos.has_node('/', 1))
self.assertEqual(False, self.repos.has_node('/', 2))
self.assertEqual(False, self.repos.has_node('/', 3))
self.assertEqual(True, self.repos.has_node('/', 4))
# We can't make this work anymore because of #5213.
# self.assertEqual(4, self.repos.oldest_rev)
self.assertEqual(1, self.repos.oldest_rev) # should really be 4...
self.assertEqual(None, self.repos.previous_rev(4))
class NonSelfContainedScopedTestCase(unittest.TestCase):
def setUp(self):
self.repos = SubversionRepository(REPOS_PATH + '/tags/v1',
{'name': 'repo', 'id': 1},
logger_factory('test'))
def tearDown(self):
self.repos = None
def test_mixed_changeset(self):
chgset = self.repos.get_changeset(7)
self.assertEqual(7, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('/', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertRaises(TracError, lambda: self.repos.get_node(None, 6))
class AnotherNonSelfContainedScopedTestCase(unittest.TestCase):
def setUp(self):
self.repos = SubversionRepository(REPOS_PATH + '/branches',
{'name': 'repo', 'id': 1},
logger_factory('test'))
def tearDown(self):
self.repos = None
def test_mixed_changeset_with_edit(self):
chgset = self.repos.get_changeset(9)
self.assertEqual(9, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('v1x/README.txt', Node.FILE, Changeset.EDIT,
'v1x/README.txt', 8),
changes.next())
def suite():
suite = unittest.TestSuite()
if has_svn:
suite.addTest(unittest.makeSuite(SubversionRepositoryTestCase,
'test', suiteClass=SubversionRepositoryTestSetup))
suite.addTest(unittest.makeSuite(ScopedSubversionRepositoryTestCase,
'test', suiteClass=SubversionRepositoryTestSetup))
suite.addTest(unittest.makeSuite(RecentPathScopedRepositoryTestCase,
'test', suiteClass=SubversionRepositoryTestSetup))
suite.addTest(unittest.makeSuite(NonSelfContainedScopedTestCase,
'test', suiteClass=SubversionRepositoryTestSetup))
suite.addTest(unittest.makeSuite(AnotherNonSelfContainedScopedTestCase,
'test', suiteClass=SubversionRepositoryTestSetup))
else:
print "SKIP: versioncontrol/tests/svn_fs.py (no svn bindings)"
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
{
"content_hash": "ba63394d227c2e34929d008aab319348",
"timestamp": "",
"source": "github",
"line_count": 820,
"max_line_length": 86,
"avg_line_length": 44.886585365853655,
"alnum_prop": 0.6030918031896106,
"repo_name": "dokipen/trac",
"id": "c3c014a28ddc01ad316578ac4456cf48e5be0757",
"size": "37503",
"binary": false,
"copies": "1",
"ref": "refs/heads/announcer",
"path": "trac/versioncontrol/tests/svn_fs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "11612"
},
{
"name": "JavaScript",
"bytes": "45742"
},
{
"name": "Python",
"bytes": "2183584"
}
],
"symlink_target": ""
}
|
import os
import djcelery
from gork.settings.base import *
djcelery.setup_loader()
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '%s/../test.db' % PROJECT_ROOT,
}
}
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
'ask',
#'feedz',
'djcelery',
'entrez',
'endless_pagination',
'gfavor',
'mlst',
'sale',
#'actstream',
)
#ACTSTREAM_SETTINGS = {
# 'MODELS': ('gauth.guser', 'auth.group', 'sites.site', 'comments.comment'),
# 'MANAGER': 'actstream.managers.ActionManager',
# 'FETCH_RELATIONS': True,
# 'USE_PREFETCH': True,
#'USE_JSONFIELD': True,
#'GFK_FETCH_DEPTH': 1,
#}
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
INTERNAL_IPS = (
'127.0.0.1',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
SECRET_KEY = '007'
SITE_THEME = 'default',
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'assets/'),
os.path.join(PROJECT_ROOT, 'themes/%s/assets/' % SITE_THEME),
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'themes/%s/templates/' % SITE_THEME),
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'gauth.backends.RoleBackend',
'gauth.backends.PermissionBackend',
)
|
{
"content_hash": "c44ef4f857010436cd28a4a97b338a47",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6360448807854138,
"repo_name": "indexofire/gork",
"id": "24e77bdea5e20fe3352c3ca188167dbc3fa02b47",
"size": "1450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gork/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199039"
},
{
"name": "JavaScript",
"bytes": "89817"
},
{
"name": "Python",
"bytes": "1120919"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
}
|
import sqlite3
from abc import ABCMeta
class Database:
__metaclass__ = ABCMeta
_sql_table = u'''TABLE DEFINITION'''
def __init__(self, database_file, database_name):
self._connection = sqlite3.connect(database_file, detect_types=sqlite3.PARSE_DECLTYPES)
self._database_name = database_name
self._cursor = self._connection.cursor()
self._cursor.execute(u'''SELECT name FROM sqlite_master
WHERE type='table' AND name='%s';''' % self._database_name)
if self._cursor.fetchone() is None:
self._cursor.execute(self._sql_table.format(self._database_name))
self._connection.commit()
def __del__(self):
self._connection.close()
|
{
"content_hash": "bb562cd01905a994643f25b4c48f71fa",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 95,
"avg_line_length": 34.476190476190474,
"alnum_prop": 0.6339779005524862,
"repo_name": "Staubteufel/AliCouponHunter",
"id": "927182445496216a1cf56347890be6ab4125e5c8",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Database/Database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13410"
}
],
"symlink_target": ""
}
|
"""typy fragments"""
import inspect
from ._errors import TyError, FragmentError
__all__ = ('Fragment', 'is_fragment')
class Fragment(object):
def __init__(self):
raise NotImplementedError()
@classmethod
def init_idx(cls, ctx, idx_ast):
raise FragmentError(cls.__name__ + " does not implement init_idx.", cls)
@classmethod
def idx_eq(cls, ctx, idx1, idx2):
return idx1 == idx2
precedence = set()
##
## intro expression forms
##
@classmethod
def ana_Lambda(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support lambda literals.", e)
@classmethod
def trans_Lambda(cls, ctx, e, idx):
raise FragmentError(
cls.__name__ + " missing translation method: trans_Lambda.",
cls)
@classmethod
def ana_Dict(cls, ctx, e, idx):
raise TyError(
cls.__name__ + " does not support dictionary literals.",
e)
@classmethod
def trans_Dict(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Dict.",
cls)
@classmethod
def ana_Set(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support set literals.", e)
@classmethod
def trans_Set(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Set.", cls)
@classmethod
def ana_Num(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support number literals.", e)
@classmethod
def trans_Num(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Num.", cls)
@classmethod
def ana_Str(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support string literals.", e)
@classmethod
def trans_Str(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Str.", cls)
@classmethod # TODO what are these
def ana_Bytes(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support byte literals.", e)
@classmethod
def trans_Bytes(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Bytes.", cls)
@classmethod # TODO what are these
def ana_NameConstant(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support name constant literals.", e)
@classmethod
def trans_NameConstant(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_NameTyExprstant.", cls)
@classmethod
def ana_List(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support list literals.", e)
@classmethod
def trans_List(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_List.", cls)
@classmethod
def ana_Tuple(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support tuple literals.", e)
@classmethod
def trans_Tuple(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Tuple.", e)
##
## function def forms
##
@classmethod
def syn_FunctionDef(cls, ctx, stmt):
raise TyError(cls.__name__ + " does not support fragment-decorated def literals.",
stmt)
@classmethod
def ana_FunctionDef(cls, ctx, stmt, idx):
raise TyError(cls.__name__ + " does not support def literals.", stmt)
@classmethod
def trans_FunctionDef(cls, ctx, stmt, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_FunctionDef.", stmt)
##
## other statement forms
##
@classmethod
def check_Assign(cls, ctx, stmt):
raise TyError(cls.__name__ + " does not support assignment statements.", cls)
@classmethod
def trans_Assign(cls, ctx, stmt):
raise FragmentError(cls.__name__ + " missing translation method: trans_Assign", cls)
@classmethod
def check_Expr(cls, ctx, stmt):
raise TyError(cls.__name__ + " does not support expression statements.", cls)
@classmethod
def trans_Expr(cls, ctx, stmt):
raise FragmentError(cls.__name__ + " missing translation method: trans_Expr", cls)
# Targeted Forms
@classmethod
def syn_UnaryOp(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support unary operations.", cls)
@classmethod
def trans_UnaryOp(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_UnaryOp", cls)
@classmethod
def syn_IfExp(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support if expressions.", cls)
@classmethod
def trans_IfExp(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_IfExp", cls)
@classmethod
def syn_Call(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support call expressions.", cls)
@classmethod
def trans_Call(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Call", cls)
@classmethod
def syn_Attribute(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support attribute expressions.", cls)
@classmethod
def trans_Attribute(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Attribute", cls)
@classmethod
def syn_Subscript(cls, ctx, e, idx):
raise TyError(cls.__name__ + " does not support subscript expressions.", cls)
@classmethod
def trans_Subscript(cls, ctx, e, idx):
raise FragmentError(cls.__name__ + " missing translation method: trans_Subscript", cls)
# Binary Forms
@classmethod
def syn_BinOp(cls, ctx, e):
raise TyError(cls.__name__ + " does not support binary operators.", cls)
@classmethod
def trans_BinOp(cls, ctx, e):
raise FragmentError(cls.__name__ + " missing translation method: trans_BinOp.", cls)
def is_fragment(x):
return inspect.isclass(x) and issubclass(x, Fragment)
|
{
"content_hash": "f0db078aed9d2d4b382d7305f9817a23",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 102,
"avg_line_length": 31.743589743589745,
"alnum_prop": 0.6163166397415186,
"repo_name": "cyrus-/typy",
"id": "5911a890477feec6ce444eb4341ac75002bde3ce",
"size": "6190",
"binary": false,
"copies": "1",
"ref": "refs/heads/typy",
"path": "typy/_fragments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "224919"
}
],
"symlink_target": ""
}
|
"""
constants.py
author: Kevin Jamieson, kevin.g.jamieson@gmail.com,
Lalit Jain, lalitkumarj@gmail.com
last updated: 2/21/2015
Main configuration file for next_backend. Feel free to adjust by hand, but it
should be adjusted through docker environment variables. To allow for fig
usage and docker linking, we use the enviroment variables available here:
http://docs.docker.com/userguide/dockerlinks/ Note that this forces us to run
redis and mongodb on 6379 and 27017. This seems to be best practice anyways.
"""
import os
# Variable to enable sites. This allows you to build clients and sites on the
# NEXT system.
SITES_ENABLED = False
# Backend Host Url
NEXT_BACKEND_GLOBAL_HOST = os.environ.get('NEXT_BACKEND_GLOBAL_HOST', None)
NEXT_BACKEND_GLOBAL_PORT = os.environ.get('NEXT_BACKEND_GLOBAL_PORT', '8000')
AWS_ACCESS_ID = os.environ.get('AWS_ACCESS_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
GIT_HASH = os.environ.get('GIT_HASH', '')
if GIT_HASH=='':
import subprocess
try:
GIT_HASH = subprocess.check_output(['git', 'rev-parse', 'HEAD'])[0:-1]
except:
GIT_HASH = ''
MINIONREDIS_HOST = os.environ.get('MINIONREDIS_PORT_6379_TCP_ADDR', 'localhost')
MINIONREDIS_PORT = int(os.environ.get('MINIONREDIS_PORT_6379_TCP_PORT', 6379))
MINIONREDIS_PASS = os.environ.get('MINIONREDIS_ENV_REDIS_PASS', '')
# PermStore constants
MONGODB_HOST = os.environ.get('MONGODB_PORT_27017_TCP_ADDR','localhost')
MONGODB_PORT = int(os.environ.get('MONGODB_PORT_27017_TCP_PORT', 27017) )
# Database client constants
app_data_database_id = 'app_data'
logs_database_id = 'logs'
maxStringLengthInInspectDatabase = 200
RABBIT_HOSTNAME = os.environ.get('RABBIT_PORT_5672_TCP_ADDR', 'localhost')
RABBIT_PORT= int(os.environ.get('RABBIT_PORT_5672_TCP_PORT', 5672))
BROKER_URL = 'amqp://{user}:{password}@{hostname}:{port}/{vhost}/'.format(
user=os.environ.get('RABBIT_ENV_RABBITMQ_USER', 'guest'),
password=os.environ.get('RABBIT_ENV_RABBITMQ_PASS', 'guest'),
hostname=RABBIT_HOSTNAME,
port=RABBIT_PORT,
vhost=os.environ.get('RABBIT_ENV_VHOST', ''))
RABBITREDIS_HOSTNAME = os.environ.get('RABBITREDIS_PORT_6379_TCP_ADDR', 'localhost')
RABBITREDIS_PORT = int(os.environ.get('RABBITREDIS_PORT_6379_TCP_PORT', 6379))
# https://github.com/celery/celery/issues/1909 describes the tradeoffs of redis and rabbitmq for results backend
CELERY_RESULT_BACKEND = 'redis://{hostname}:{port}/{db}/'.format(
hostname=RABBITREDIS_HOSTNAME,
port=RABBITREDIS_PORT,
db=os.environ.get('RABBITREDIS_DB', '0'))
# CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_RESULT_EXPIRES=60
CELERY_TASK_SERIALIZER='json'
CELERY_ACCEPT_CONTENT=['json'] # Ignore other content
CELERY_RESULT_SERIALIZER='json'
CELERYD_PREFETCH_MULTIPLIER=10
CELERY_SYNC_WORKER_COUNT = int(os.environ.get('CELERY_SYNC_WORKER_COUNT',1))
# from kombu import Exchange, Queue
# exchange_name = 'sync@{hostname}'.format(
# hostname=os.environ.get('HOSTNAME', 'localhost'))
# sync_exchange = Exchange(name=exchange_name, type='fanout')
# all_queues = ()
# for i in range(1,CELERY_SYNC_WORKER_COUNT+1):
# queue_name = 'sync_queue_{worker_number}@{hostname}'.format(
# worker_number=i,
# hostname=os.environ.get('HOSTNAME', 'localhost'))
# all_queues += (Queue(name=queue_name,exchange=sync_exchange),)
# CELERY_QUEUES = all_queues
|
{
"content_hash": "f0eca4976dd716ada511edd2746118e8",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 112,
"avg_line_length": 36.53763440860215,
"alnum_prop": 0.7195409064155386,
"repo_name": "crcox/NEXT",
"id": "97413ba280d00c2c4838f0e6aa6755011d6e85f5",
"size": "3398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "next/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "74514"
},
{
"name": "JavaScript",
"bytes": "16603"
},
{
"name": "Python",
"bytes": "817267"
},
{
"name": "Shell",
"bytes": "5783"
}
],
"symlink_target": ""
}
|
"""Datastore backed Blobstore API stub.
Class:
BlobstoreServiceStub: BlobstoreService stub backed by datastore.
"""
import base64
import os
import time
import urlparse
from google.appengine.api import apiproxy_stub
from google.appengine.api import blobstore
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.api.blobstore import blobstore_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BlobStorage',
'BlobstoreServiceStub',
'ConfigurationError',
'CreateUploadSession',
'Error',
]
class Error(Exception):
"""Base blobstore error type."""
class ConfigurationError(Error):
"""Raised when environment is not correctly configured."""
_UPLOAD_SESSION_KIND = '__BlobUploadSession__'
_GS_INFO_KIND = '__GsFileInfo__'
def CreateUploadSession(creation,
success_path,
user,
max_bytes_per_blob,
max_bytes_total,
bucket_name=None):
"""Create upload session in datastore.
Creates an upload session and puts it in Datastore to be referenced by
upload handler later.
Args:
creation: Creation timestamp.
success_path: Path in users application to call upon success.
user: User that initiated this upload, if any.
max_bytes_per_blob: Maximum number of bytes for any blob in the upload.
max_bytes_total: Maximum aggregate bytes for all blobs in the upload.
bucket_name: Name of the Google Storage bucket tio upload the files.
Returns:
String encoded key of new Datastore entity.
"""
entity = datastore.Entity(_UPLOAD_SESSION_KIND, namespace='')
entity_dict = {'creation': creation,
'success_path': success_path,
'user': user,
'state': 'init',
'max_bytes_per_blob': max_bytes_per_blob,
'max_bytes_total': max_bytes_total}
if bucket_name:
entity_dict['gs_bucket_name'] = bucket_name
entity.update(entity_dict)
datastore.Put(entity)
return str(entity.key())
class BlobStorage(object):
"""Base class for defining how blobs are stored.
This base class merely defines an interface that all stub blob-storage
mechanisms must implement.
"""
def StoreBlob(self, blob_key, blob_stream):
"""Store blob stream.
Implement this method to persist blob data.
Args:
blob_key: Blob key of blob to store.
blob_stream: Stream or stream-like object that will generate blob content.
"""
raise NotImplementedError('Storage class must override StoreBlob method.')
def OpenBlob(self, blob_key):
"""Open blob for streaming.
Args:
blob_key: Blob-key of existing blob to open for reading.
Returns:
Open file stream for reading blob. Caller is responsible for closing
file.
"""
raise NotImplementedError('Storage class must override OpenBlob method.')
def DeleteBlob(self, blob_key):
"""Delete blob data from storage.
Args:
blob_key: Blob-key of existing blob to delete.
"""
raise NotImplementedError('Storage class must override DeleteBlob method.')
class BlobstoreServiceStub(apiproxy_stub.APIProxyStub):
"""Datastore backed Blobstore service stub.
This stub stores manages upload sessions in the Datastore and must be
provided with a blob_storage object to know where the actual blob
records can be found after having been uploaded.
This stub does not handle the actual creation of blobs, neither the BlobInfo
in the Datastore nor creation of blob data in the blob_storage. It does,
however, assume that another part of the system has created these and
uses these objects for deletion.
An upload session is created when the CreateUploadURL request is handled and
put in the Datastore under the __BlobUploadSession__ kind. There is no
analog for this kind on a production server. Other than creation, this stub
not work with session objects. The URLs created by this service stub are:
http://<appserver-host>:<appserver-port>/<uploader-path>/<session-info>
This is very similar to what the URL is on a production server. The session
info is the string encoded version of the session entity
"""
_ACCEPTS_REQUEST_ID = True
GS_BLOBKEY_PREFIX = 'encoded_gs_file:'
THREADSAFE = False
def __init__(self,
blob_storage,
time_function=time.time,
service_name='blobstore',
uploader_path='_ah/upload/',
request_data=None):
"""Constructor.
Args:
blob_storage: BlobStorage class instance used for blob storage.
time_function: Used for dependency injection in tests.
service_name: Service name expected for all calls.
uploader_path: Path to upload handler pointed to by URLs generated
by this service stub.
request_data: A apiproxy_stub.RequestData instance used to look up state
associated with the request that generated an API call.
"""
super(BlobstoreServiceStub, self).__init__(service_name,
request_data=request_data)
self.__storage = blob_storage
self.__time_function = time_function
self.__next_session_id = 1
self.__uploader_path = uploader_path
@classmethod
def ToDatastoreBlobKey(cls, blobkey):
"""Given a string blobkey, return its db.Key."""
kind = blobstore.BLOB_INFO_KIND
if blobkey.startswith(cls.GS_BLOBKEY_PREFIX):
kind = _GS_INFO_KIND
return datastore_types.Key.from_path(kind,
blobkey,
namespace='')
@property
def storage(self):
"""Access BlobStorage used by service stub.
Returns:
BlobStorage instance used by blobstore service stub.
"""
return self.__storage
def _GetEnviron(self, name):
"""Helper method ensures environment configured as expected.
Args:
name: Name of environment variable to get.
Returns:
Environment variable associated with name.
Raises:
ConfigurationError if required environment variable is not found.
"""
try:
return os.environ[name]
except KeyError:
raise ConfigurationError('%s is not set in environment.' % name)
def _CreateSession(self,
success_path,
user,
max_bytes_per_blob=None,
max_bytes_total=None,
bucket_name=None):
"""Create new upload session.
Args:
success_path: Application path to call upon successful POST.
user: User that initiated the upload session.
max_bytes_per_blob: Maximum number of bytes for any blob in the upload.
max_bytes_total: Maximum aggregate bytes for all blobs in the upload.
bucket_name: The name of the Cloud Storage bucket where the files will be
uploaded.
Returns:
String encoded key of a new upload session created in the datastore.
"""
return CreateUploadSession(self.__time_function(),
success_path,
user,
max_bytes_per_blob,
max_bytes_total,
bucket_name)
def _Dynamic_CreateUploadURL(self, request, response, request_id):
"""Create upload URL implementation.
Create a new upload session. The upload session key is encoded in the
resulting POST URL. This URL is embedded in a POST form by the application
which contacts the uploader when the user posts.
Args:
request: A fully initialized CreateUploadURLRequest instance.
response: A CreateUploadURLResponse instance.
request_id: A unique string identifying the request associated with the
API call.
"""
max_bytes_per_blob = None
max_bytes_total = None
bucket_name = None
if request.has_max_upload_size_per_blob_bytes():
max_bytes_per_blob = request.max_upload_size_per_blob_bytes()
if request.has_max_upload_size_bytes():
max_bytes_total = request.max_upload_size_bytes()
if request.has_gs_bucket_name():
bucket_name = request.gs_bucket_name()
session = self._CreateSession(request.success_path(),
users.get_current_user(),
max_bytes_per_blob,
max_bytes_total,
bucket_name)
protocol, host, _, _, _, _ = urlparse.urlparse(
self.request_data.get_request_url(request_id))
response.set_url('%s://%s/%s%s' % (protocol, host, self.__uploader_path,
session))
@classmethod
def DeleteBlob(cls, blobkey, storage):
"""Delete a blob.
Args:
blobkey: blobkey in str.
storage: blobstore storage stub.
"""
datastore.Delete(cls.ToDatastoreBlobKey(blobkey))
blobinfo = datastore_types.Key.from_path(blobstore.BLOB_INFO_KIND,
blobkey,
namespace='')
datastore.Delete(blobinfo)
storage.DeleteBlob(blobkey)
def _Dynamic_DeleteBlob(self, request, response, unused_request_id):
"""Delete a blob by its blob-key.
Delete a blob from the blobstore using its blob-key. Deleting blobs that
do not exist is a no-op.
Args:
request: A fully initialized DeleteBlobRequest instance.
response: Not used but should be a VoidProto.
"""
for blobkey in request.blob_key_list():
self.DeleteBlob(blobkey, self.__storage)
def _Dynamic_FetchData(self, request, response, unused_request_id):
"""Fetch a blob fragment from a blob by its blob-key.
Fetches a blob fragment using its blob-key. Start index is inclusive,
end index is inclusive. Valid requests for information outside of
the range of the blob return a partial string or empty string if entirely
out of range.
Args:
request: A fully initialized FetchDataRequest instance.
response: A FetchDataResponse instance.
Raises:
ApplicationError when application has the following errors:
INDEX_OUT_OF_RANGE: Index is negative or end > start.
BLOB_FETCH_SIZE_TOO_LARGE: Request blob fragment is larger than
MAX_BLOB_FRAGMENT_SIZE.
BLOB_NOT_FOUND: If invalid blob-key is provided or is not found.
"""
start_index = request.start_index()
if start_index < 0:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE)
end_index = request.end_index()
if end_index < start_index:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE)
fetch_size = end_index - start_index + 1
if fetch_size > blobstore.MAX_BLOB_FETCH_SIZE:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE)
blobkey = request.blob_key()
info_key = self.ToDatastoreBlobKey(blobkey)
try:
datastore.Get(info_key)
except datastore_errors.EntityNotFoundError:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND)
blob_file = self.__storage.OpenBlob(blobkey)
blob_file.seek(start_index)
response.set_data(blob_file.read(fetch_size))
def _Dynamic_DecodeBlobKey(self, request, response, unused_request_id):
"""Decode a given blob key: data is simply base64-decoded.
Args:
request: A fully-initialized DecodeBlobKeyRequest instance
response: A DecodeBlobKeyResponse instance.
"""
for blob_key in request.blob_key_list():
response.add_decoded(blob_key.decode('base64'))
@classmethod
def CreateEncodedGoogleStorageKey(cls, filename):
"""Create an encoded blob key that represents a Google Storage file.
For now we'll just base64 encode the Google Storage filename, APIs that
accept encoded blob keys will need to be able to support Google Storage
files or blobstore files based on decoding this key.
Any stub that creates GS files should use this function to convert
a gs filename to a blobkey. The created blobkey should be used both
as its _GS_FILE_INFO entity's key name and as the storage key to
store its content in blobstore. This ensures the GS files created
can be operated by other APIs.
Note this encoding is easily reversible and is not encryption.
Args:
filename: gs filename of form 'bucket/filename'
Returns:
blobkey string of encoded filename.
"""
return cls.GS_BLOBKEY_PREFIX + base64.urlsafe_b64encode(filename)
def _Dynamic_CreateEncodedGoogleStorageKey(self, request, response,
unused_request_id):
"""Create an encoded blob key that represents a Google Storage file.
For now we'll just base64 encode the Google Storage filename, APIs that
accept encoded blob keys will need to be able to support Google Storage
files or blobstore files based on decoding this key.
Args:
request: A fully-initialized CreateEncodedGoogleStorageKeyRequest
instance.
response: A CreateEncodedGoogleStorageKeyResponse instance.
"""
filename = request.filename()[len(blobstore.GS_PREFIX):]
response.set_blob_key(
self.CreateEncodedGoogleStorageKey(filename))
def CreateBlob(self, blob_key, content):
"""Create new blob and put in storage and Datastore.
This is useful in testing where you have access to the stub.
Args:
blob_key: String blob-key of new blob.
content: Content of new blob as a string.
Returns:
New Datastore entity without blob meta-data fields.
"""
entity = datastore.Entity(blobstore.BLOB_INFO_KIND,
name=blob_key, namespace='')
entity['size'] = len(content)
datastore.Put(entity)
self.storage.CreateBlob(blob_key, content)
return entity
|
{
"content_hash": "8163bcefbf93f00a0288b5e7910a106c",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 80,
"avg_line_length": 33.47552447552447,
"alnum_prop": 0.661653088225054,
"repo_name": "GoogleCloudPlatform/python-compat-runtime",
"id": "c3e70726f60f86ba795504f582b04bb96757e90e",
"size": "14966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine-compat/exported_appengine_sdk/google/appengine/api/blobstore/blobstore_stub.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30211"
},
{
"name": "HTML",
"bytes": "171272"
},
{
"name": "JavaScript",
"bytes": "414229"
},
{
"name": "Makefile",
"bytes": "2138"
},
{
"name": "PHP",
"bytes": "3132250"
},
{
"name": "Python",
"bytes": "11709249"
},
{
"name": "Shell",
"bytes": "1787"
}
],
"symlink_target": ""
}
|
"""This module contains functions and methods that relate to the DataInfo class
which provides a container for informational attributes as well as summary info
methods.
A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in
astropy. Here it allows those classes to be used in Tables and uniformly carry
table column attributes such as name, format, dtype, meta, and description.
"""
# Note: these functions and classes are tested extensively in astropy table
# tests via their use in providing mixin column info, and in
# astropy/tests/test_info for providing table and column info summary data.
from __future__ import absolute_import, division, print_function
import os
import sys
import weakref
from copy import deepcopy
import numpy as np
from functools import partial
import warnings
import re
from collections import OrderedDict
from ..extern import six
from ..utils.compat import NUMPY_LT_1_8
from ..extern.six.moves import zip, cStringIO as StringIO
# Tuple of filterwarnings kwargs to ignore when calling info
IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|'
'Mean of empty slice|Degrees of freedom <= 0'),)
STRING_TYPE_NAMES = {(False, 'S'): 'str', # not PY3
(False, 'U'): 'unicode',
(True, 'S'): 'bytes', # PY3
(True, 'U'): 'str'}
def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
For bytes, string and unicode types, the output is shown below, where <N>
is the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python 2 str<N> unicode<N>
Python 3 bytes<N> str<N>
Parameters
----------
dtype : str, np.dtype, type
Input dtype as an object that can be converted via np.dtype()
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.kind in ('S', 'U'):
length = re.search(r'(\d+)', dtype.str).group(1)
type_name = STRING_TYPE_NAMES[(not six.PY2, dtype.kind)]
out = type_name + length
else:
out = dtype.name
return out
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1.0
median = 2.5
max = 4.0
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, six.string_types):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append('--')
else:
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func
def _get_obj_attrs_map(obj, attrs):
"""
Get the values for object ``attrs`` and return as a dict. This
ignores any attributes that are None and in Py2 converts any unicode
attribute names or values to str. In the context of serializing the
supported core astropy classes this conversion will succeed and results
in more succinct and less python-specific YAML.
"""
out = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
if six.PY2:
attr = str(attr)
if isinstance(val, six.text_type):
val = str(val)
out[attr] = val
return out
def _get_data_attribute(dat, attr=None):
"""
Get a data object attribute for the ``attributes`` info summary method
"""
if attr == 'class':
val = type(dat).__name__
elif attr == 'dtype':
val = dtype_info_name(dat.info.dtype)
elif attr == 'shape':
datshape = dat.shape[1:]
val = datshape if datshape else ''
else:
val = getattr(dat.info, attr)
if val is None:
val = ''
return str(val)
class DataInfo(object):
"""
Descriptor that data classes use to add an ``info`` attribute for storing
data attributes in a uniform and portable way. Note that it *must* be
called ``info`` so that the DataInfo() object can be stored in the
``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,
Python doesn't use __dict__['x'] normally, and the descriptor can safely
store stuff there. Thanks to http://nbviewer.ipython.org/urls/
gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for
this trick that works for non-hashable classes.
Parameters
----------
bound : bool, default=False
If True this is a descriptor attribute in a class definition, else it
is a DataInfo() object that is bound to a data object instance.
"""
_stats = ['mean', 'std', 'min', 'max']
attrs_from_parent = set()
attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta'])
_attrs_no_copy = set()
_info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class')
_represent_as_dict_attrs= ()
_parent = None
def __init__(self, bound=False):
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
self._attrs = dict((attr, None) for attr in self.attr_names)
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
if isinstance(value, DataInfo):
info = instance.__dict__['info'] = self.__class__(bound=True)
for attr in info.attr_names - info.attrs_from_parent - info._attrs_no_copy:
info._attrs[attr] = deepcopy(getattr(value, attr))
else:
raise TypeError('info must be set with a DataInfo instance')
def __getstate__(self):
return self._attrs
def __setstate__(self, state):
self._attrs = state
def __getattr__(self, attr):
if attr.startswith('_'):
return super(DataInfo, self).__getattribute__(attr)
if attr in self.attrs_from_parent:
return getattr(self._parent, attr)
try:
value = self._attrs[attr]
except KeyError:
super(DataInfo, self).__getattribute__(attr) # Generate AttributeError
# Weak ref for parent table
if attr == 'parent_table' and callable(value):
value = value()
# Mixins have a default dtype of Object if nothing else was set
if attr == 'dtype' and value is None:
value = np.dtype('O')
return value
def __setattr__(self, attr, value):
propobj = getattr(self.__class__, attr, None)
# If attribute is taken from parent properties and there is not a
# class property (getter/setter) for this attribute then set
# attribute directly in parent.
if attr in self.attrs_from_parent and not isinstance(propobj, property):
setattr(self._parent, attr, value)
return
# Check if there is a property setter and use it if possible.
if isinstance(propobj, property):
if propobj.fset is None:
raise AttributeError("can't set attribute")
propobj.fset(self, value)
return
# Private attr names get directly set
if attr.startswith('_'):
super(DataInfo, self).__setattr__(attr, value)
return
# Finally this must be an actual data attribute that this class is handling.
if attr not in self.attr_names:
raise AttributeError("attribute must be one of {0}".format(self.attr_names))
if attr == 'parent_table':
value = None if value is None else weakref.ref(value)
self._attrs[attr] = value
def _represent_as_dict(self):
"""
Get the values for the parent ``attrs`` and return as a dict.
This is typically used for serializing the parent.
"""
return _get_obj_attrs_map(self._parent, self._represent_as_dict_attrs)
def _construct_from_dict(self, map):
return self._parent_cls(**map)
info_summary_attributes = staticmethod(
data_info_factory(names=_info_summary_attrs,
funcs=[partial(_get_data_attribute, attr=attr)
for attr in _info_summary_attrs]))
# No nan* methods in numpy < 1.8
info_summary_stats = staticmethod(
data_info_factory(names=_stats,
funcs=[getattr(np, ('' if NUMPY_LT_1_8 else 'nan') + stat)
for stat in _stats]))
def __call__(self, option='attributes', out=''):
"""
Write summary information about data object to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: data object attributes like ``dtype`` and ``format``
- ``stats``: basic statistics: min, mean, and max
If a function is specified then that function will be called with the
data object as its single argument. The function must return an
OrderedDict containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table import Column
>>> c = Column([1, 2], unit='m', dtype='int32')
>>> c.info()
dtype = int32
unit = m
class = Column
n_bad = 0
length = 2
>>> c.info(['attributes', 'stats'])
dtype = int32
unit = m
class = Column
mean = 1.5
std = 0.5
min = 1
max = 2
n_bad = 0
length = 2
Parameters
----------
option : str, function, list of (str or function)
Info option (default='attributes')
out : file-like object, None
Output destination (default=sys.stdout). If None then the
OrderedDict with information attributes is returned
Returns
-------
info : OrderedDict if out==None else None
"""
if out == '':
out = sys.stdout
dat = self._parent
info = OrderedDict()
name = dat.info.name
if name is not None:
info['name'] = name
options = option if isinstance(option, (list, tuple)) else [option]
for option in options:
if isinstance(option, six.string_types):
if hasattr(self, 'info_summary_' + option):
option = getattr(self, 'info_summary_' + option)
else:
raise ValueError('option={0} is not an allowed information type'
.format(option))
with warnings.catch_warnings():
for ignore_kwargs in IGNORE_WARNINGS:
warnings.filterwarnings('ignore', **ignore_kwargs)
info.update(option(dat))
if hasattr(dat, 'mask'):
n_bad = np.count_nonzero(dat.mask)
else:
try:
n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))
except Exception:
n_bad = 0
info['n_bad'] = n_bad
try:
info['length'] = len(dat)
except TypeError:
pass
if out is None:
return info
for key, val in info.items():
if val != '':
out.write('{0} = {1}'.format(key, val) + os.linesep)
def __repr__(self):
if self._parent is None:
return super(DataInfo, self).__repr__()
out = StringIO()
self.__call__(out=out)
return out.getvalue()
class BaseColumnInfo(DataInfo):
"""
Base info class for anything that can be a column in an astropy
Table. There are at least two classes that inherit from this:
ColumnInfo: for native astropy Column / MaskedColumn objects
MixinInfo: for mixin column objects
Note that this class is defined here so that mixins can use it
without importing the table package.
"""
attr_names = DataInfo.attr_names.union(['parent_table', 'indices'])
_attrs_no_copy = set(['parent_table'])
def iter_str_vals(self):
"""
This is a mixin-safe version of Column.iter_str_vals.
"""
col = self._parent
if self.parent_table is None:
from ..table.column import FORMATTER as formatter
else:
formatter = self.parent_table.formatter
_pformat_col_iter = formatter._pformat_col_iter
for str_val in _pformat_col_iter(col, -1, False, False, {}):
yield str_val
def adjust_indices(self, index, value, col_len):
'''
Adjust info indices after column modification.
Parameters
----------
index : slice, int, list, or ndarray
Element(s) of column to modify. This parameter can
be a single row number, a list of row numbers, an
ndarray of row numbers, a boolean ndarray (a mask),
or a column slice.
value : int, list, or ndarray
New value(s) to insert
col_len : int
Length of the column
'''
if not self.indices:
return
if isinstance(index, slice):
# run through each key in slice
t = index.indices(col_len)
keys = list(range(*t))
elif isinstance(index, np.ndarray) and index.dtype.kind == 'b':
# boolean mask
keys = np.where(index)[0]
else: # single int
keys = [index]
value = np.atleast_1d(value) # turn array(x) into array([x])
if value.size == 1:
# repeat single value
value = list(value) * len(keys)
for key, val in zip(keys, value):
for col_index in self.indices:
col_index.replace(key, self.name, val)
def slice_indices(self, col_slice, item, col_len):
'''
Given a sliced object, modify its indices
to correctly represent the slice.
Parameters
----------
col_slice : Column or mixin
Sliced object
item : slice, list, or ndarray
Slice used to create col_slice
col_len : int
Length of original object
'''
from ..table.sorted_array import SortedArray
if not getattr(self, '_copy_indices', True):
# Necessary because MaskedArray will perform a shallow copy
col_slice.info.indices = []
return col_slice
elif isinstance(item, slice):
col_slice.info.indices = [x[item] for x in self.indices]
elif self.indices:
if isinstance(item, np.ndarray) and item.dtype.kind == 'b':
# boolean mask
item = np.where(item)[0]
threshold = 0.6
# Empirical testing suggests that recreating a BST/RBT index is
# more effective than relabelling when less than ~60% of
# the total number of rows are involved, and is in general
# more effective for SortedArray.
small = len(item) <= 0.6 * col_len
col_slice.info.indices = []
for index in self.indices:
if small or isinstance(index, SortedArray):
new_index = index.get_slice(col_slice, item)
else:
new_index = deepcopy(index)
new_index.replace_rows(item)
col_slice.info.indices.append(new_index)
return col_slice
class MixinInfo(BaseColumnInfo):
def __setattr__(self, attr, value):
# For mixin columns that live within a table, rename the column in the
# table when setting the name attribute. This mirrors the same
# functionality in the BaseColumn class.
if attr == 'name' and self.parent_table is not None:
from ..table.np_utils import fix_column_name
new_name = fix_column_name(value) # Ensure col name is numpy compatible
self.parent_table.columns._rename_column(self.name, new_name)
super(MixinInfo, self).__setattr__(attr, value)
class ParentDtypeInfo(MixinInfo):
"""Mixin that gets info.dtype from parent"""
attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
|
{
"content_hash": "3f87858735ce6286a61e220ce1aecfa3",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 88,
"avg_line_length": 34.494360902255636,
"alnum_prop": 0.5793689717181625,
"repo_name": "tbabej/astropy",
"id": "922b015329eedf5b65418b593d7578f7a16fe0df",
"size": "18440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/utils/data_info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7610601"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
from settings import url
from mood import MoodSpinner
from need import NeedCheckBoxes
from gender import GenderRadioBox
from kivy.layout.box import BoxLayout
from kivy.uix.checkbox import CheckBox
from kivy.properties import BooleanProperty, StringProperty
class HomelessWidget(BoxLayout):
handicaped = BooleanProperty()
comment = StringProperty()
def __init__(self, *args, **kwargs):
super(HomelessWidget, self).__init__(orientation='vertical')
self.add_widget(MoodSpinner())
self.add_widget(NeedCheckBoxes())
self.add_widget(NeedCheckBoxes())
self.add_widget(CheckBox())
self.add_widget(GenderRadioBox())
|
{
"content_hash": "ace68ac3d8634f4d9dfb7e4e6882aa4d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 27,
"alnum_prop": 0.7244444444444444,
"repo_name": "b3j0f/simpleneed",
"id": "30453c3685d2c79e3d0690d9c8524f425ba2f601",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobile/kivy/homeless.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "916"
},
{
"name": "CSS",
"bytes": "300731"
},
{
"name": "HTML",
"bytes": "143300"
},
{
"name": "IDL",
"bytes": "897"
},
{
"name": "Java",
"bytes": "2015"
},
{
"name": "JavaScript",
"bytes": "155037"
},
{
"name": "Makefile",
"bytes": "830"
},
{
"name": "Objective-C",
"bytes": "4431"
},
{
"name": "Prolog",
"bytes": "287"
},
{
"name": "Python",
"bytes": "102526"
},
{
"name": "Shell",
"bytes": "885"
},
{
"name": "TypeScript",
"bytes": "57763"
}
],
"symlink_target": ""
}
|
from django.utils import timezone
from django.test import TestCase
from breach.models import Target, Victim, Round, SampleSet
class RuptureTestCase(TestCase):
def setUp(self):
target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='0123456789'
)
self.victim = self.create_mock_victim(target)
round = Round.objects.create(
victim=self.victim,
amount=1,
knownsecret='testsecret',
knownalphabet='01'
)
self.samplesets = [
SampleSet.objects.create(
round=round,
candidatealphabet='0',
datalength=len('bigbigbigbigbigbig')
),
SampleSet.objects.create(
round=round,
candidatealphabet='1',
datalength=len('small')
)
]
# Balance checking
self.balance_victim = self.create_mock_victim(target)
balance_round = Round.objects.create(
victim=self.balance_victim,
amount=1,
knownsecret='testsecret',
knownalphabet='0123',
minroundcardinality=1,
maxroundcardinality=3
)
self.balance_samplesets = [
SampleSet.objects.create(
round=balance_round,
candidatealphabet='0',
datalength=len('bigbigbigbigbigbig')
),
SampleSet.objects.create(
round=balance_round,
candidatealphabet='123',
datalength=len('small')
)
]
def create_mock_victim(self, target):
mock_victim = Victim.objects.create(
target=target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/'
)
return mock_victim
def tearDown(self):
for sampleset in self.balance_samplesets + self.samplesets:
sampleset.completed = timezone.now()
sampleset.save()
|
{
"content_hash": "b43fe5c712d4296ebe95518bcfd9b54e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 67,
"avg_line_length": 30.042857142857144,
"alnum_prop": 0.5359010936757014,
"repo_name": "dionyziz/rupture",
"id": "59fe77fe578c248f5694d83babcd5513468871cb",
"size": "2103",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "backend/breach/tests/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "61046"
},
{
"name": "CSS",
"bytes": "5609"
},
{
"name": "HTML",
"bytes": "34042"
},
{
"name": "JavaScript",
"bytes": "52116"
},
{
"name": "Makefile",
"bytes": "805"
},
{
"name": "Python",
"bytes": "160351"
},
{
"name": "Shell",
"bytes": "9852"
},
{
"name": "TeX",
"bytes": "225330"
}
],
"symlink_target": ""
}
|
import os
import sys
if os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Engine'):
sys.path.insert(0, 'lib.zip')
else:
import re
from google.appengine.tools.devappserver2.python import stubs
re_ = stubs.FakeFile._skip_files.pattern.replace('|^lib/.*', '')
re_ = re.compile(re_)
stubs.FakeFile._skip_files = re_
sys.path.insert(0, 'lib')
sys.path.insert(0, 'libx')
def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
|
{
"content_hash": "2e5b1dc405842f0ec56290d0622d56f5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 27.3,
"alnum_prop": 0.7124542124542125,
"repo_name": "terradigital/gae-init",
"id": "142d4705af712c74c2af22c64a4bc67e59c8026e",
"size": "563",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "main/appengine_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5278"
},
{
"name": "CoffeeScript",
"bytes": "16016"
},
{
"name": "HTML",
"bytes": "67283"
},
{
"name": "JavaScript",
"bytes": "65"
},
{
"name": "Python",
"bytes": "113602"
}
],
"symlink_target": ""
}
|
import pandas as pd
import os
import shutil
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
TRAIN = ""
TEST = ""
def Create_scale_data_files(trainPath,testPath , trunck=False):
# check if fiels are not created yet
train_p , train_n = os.path.split(trainPath)
test_p ,test_n = os.path.split(testPath)
if not trunck:
if ( os.path.isfile(train_p+'/scale_'+train_n) or os.path.isfile(test_p+'/scale_'+test_n)):
print ("scaled files are Exists in {} or {} :".format( train_p,test_p))
return False
try:
train_data = pd.read_csv(trainPath)
test_data = pd.read_csv(testPath)
except FileNotFoundError as e:
print(e)
return False
scaler = MinMaxScaler(feature_range=(0,1)) ## scale the data to 0-1 values for best training
scale_train_data = scaler.fit_transform(train_data)
scale_test_data = scaler.transform (test_data) # we use 'transform because it nedded to be sane as train_data'
# Print out the adjustment that the scaler applied to the total_earnings column of data
print("Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}".format(
scaler.scale_[8],
scaler.min_[8]))
# create new data frames for the scaled data and save it for use
train_df = pd.DataFrame(scale_train_data , columns=train_data.columns.values )
test_df = pd.DataFrame(scale_test_data , columns=test_data.columns.values)
TRAIN = train_p+'/scale_'+train_n
TEST = test_p+'/scale_'+test_n
train_df.to_csv(train_p+'/scale_'+train_n)
test_df.to_csv(test_p+'/scale_'+test_n)
print ("Scaled files are saved in : {} , {}".format('scale_'+train_n,
'scale_'+test_n ) )
return train_df , test_df
def get_df():
try:
if (TRAIN != "" and TEST != ""):
train_df = pd.read_csv(TRAIN)
test_df = pd.read_csv(TEST)
return train_df , test_df
except FileNotFoundError as e:
print(e)
print ("you need to do scaling before ,try : Create_scale_data_files()")
return False
def export_to_GCP(name , inputs , outputs , sess):
if os.path.isdir('keras/export/'+name):
shutil.rmtree('keras/export/'+name)
model_builder = tf.saved_model.builder.SavedModelBuilder('keras/export/'+name)
inputs = {"inputs": tf.saved_model.utils.build_tensor_info(inputs)}
outputs = {'earnings': tf.saved_model.utils.build_tensor_info(outputs)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
model_builder.add_meta_graph_and_variables(
sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:signature
})
model_builder.save()
# train, test = Create_scale_data_files('data/train.csv', 'data/test.csv',True)
# X = train.drop('total_earnings' , axis=1)
# y = train[['total_earnings']]
# print(X.head())
# print(y.head())
# #get_df()
|
{
"content_hash": "9c29c818b50b582f494f5458efc5a82f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 120,
"avg_line_length": 35.44897959183673,
"alnum_prop": 0.5889464594127807,
"repo_name": "motkeg/Deep-learning",
"id": "ee7fc65dbb01f243dbac050361cfbc91a546cb04",
"size": "3474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CNN/simple/data/data_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "860"
},
{
"name": "Jupyter Notebook",
"bytes": "300425"
},
{
"name": "Python",
"bytes": "57655"
}
],
"symlink_target": ""
}
|
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
import waldur_core.core.fields
import waldur_mastermind.invoices.models
class Migration(migrations.Migration):
dependencies = [
('structure', '0010_customer_geolocation'),
('invoices', '0033_downtime_offering_and_resource'),
]
operations = [
migrations.CreateModel(
name='PaymentProfile',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
(
'payment_type',
waldur_mastermind.invoices.models.PaymentType(
choices=[
('invoices', 'Invoices'),
('pay_pal', 'PayPal'),
('pre_paid', 'Pre-paid agreements'),
('ita', 'ITA Payment gateway'),
],
max_length=30,
),
),
(
'attributes',
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, default=dict
),
),
(
'organization',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to='structure.Customer',
),
),
],
options={'unique_together': {('organization', 'payment_type')},},
),
]
|
{
"content_hash": "a5fababab005d6d363f7562cf78fad5a",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 32.827586206896555,
"alnum_prop": 0.4096638655462185,
"repo_name": "opennode/waldur-mastermind",
"id": "59787dc6104c7b3b875382de16e646c3aa387465",
"size": "1954",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/invoices/migrations/0034_paymentprofile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # FIXME
import settings
from rain import Measurement, build_timestamp, get_prediction_data, AmbientDataFetcher
from utils import tweet_status
from weatherchecks import does_it_snow, does_it_rain
from datastorage import DataStorage
import json
# from json import encoder
import requests
from datetime import datetime, timedelta
import copy
# FIXME: move parts to own module/class
def schiffts():
# some initialization
old_data = {}
data_queue = []
current_data = None
next_hit = {}
last_update = ''
intensity = 0
temperature_data = {'status': 0}
storage = DataStorage(settings.COLLECTOR_DATA_FILE)
# get date
now = datetime.now()
latest_radar = now - timedelta(0, 10*60) # radar has a 8minute-ish delay, so go 10minutes back in time
timestamp = build_timestamp(latest_radar)
if settings.DEBUG:
print "current timestamp: %s"%timestamp
old_rain, old_last_rain, old_last_dry, old_snow, old_data_queue, old_location_weather = storage.load_data()
# get data from srf.ch up to now
for minutes in range(0, settings.NO_SAMPLES+3):
timestamp = build_timestamp(latest_radar - timedelta(0, 60*5*minutes))
# try to retrieve a measurement for the timestamp from the old data queue
old_measurement = next((item for item in old_data_queue if item.timestamp == timestamp), None)
# get a new measurement from srf.ch if it wasn't found in the old data queue
if not old_measurement:
try:
measurement = Measurement((settings.X_LOCATION, settings.Y_LOCATION), timestamp, 3, 105)
measurement.analyze_image()
data_queue.append(measurement)
if settings.DEBUG:
print "add sample with timestamp %s"%timestamp
if minutes == 0:
current_data = measurement
last_update = timestamp
except Exception, e:
print "fail in queuefiller: %s" % e
# use old data
else:
if settings.DEBUG:
print "%s already in queue"%timestamp
if minutes == 0:
current_data = old_measurement
last_update = timestamp
data_queue.append(old_measurement)
if len(data_queue) == settings.NO_SAMPLES:
break
queue_to_save = copy.deepcopy(data_queue)
# only calculate next rain if it is currently not raining at the current location
if does_it_rain(current_data):
rain_now = True
last_rain = current_data.timestamp
last_dry = old_last_dry
intensity = current_data.location['intensity']
else:
rain_now = False
last_dry = current_data.timestamp
last_rain = old_last_rain
next_hit = get_prediction_data(current_data, data_queue, old_data, settings.TWEET_PREDICTION)
if settings.DEBUG:
print "raining now: %s, raining before: %s"%(rain_now, old_rain)
# get temperature info from SMN
if settings.GET_TEMPERATURE:
temperature_data['status'], temperature_data['temperature'] = AmbientDataFetcher.get_temperature(settings.SMN_CODE)
if settings.DEBUG:
print "temperature data: %s"%temperature_data
# get current weather from smn (only if the latest value is older than 30min)
if old_location_weather != {} and old_location_weather.has_key('timestamp'):
if now - datetime.strptime(str(old_location_weather['timestamp']), settings.DATE_FORMAT) > timedelta(0,60*30):
location_weather = AmbientDataFetcher.get_weather(settings.SMN_CODE)
else:
location_weather = old_location_weather
else:
location_weather = AmbientDataFetcher.get_weather(settings.SMN_CODE)
# check for snow
snow = does_it_snow(intensity, temperature_data)
# update twitter if state changed
if rain_now != old_rain and settings.TWEET_STATUS:
snow_update = snow or old_snow
tweet_status(rain_now, snow_update)
storage.save_data(last_update, queue_to_save, rain_now, last_dry, last_rain, next_hit, intensity, snow,
location_weather)
# make data
data_to_send = {'prediction':next_hit, 'current_data':current_data.location, 'temperature':temperature_data,
'snow':snow, 'current_weather':location_weather}
# send data to server
# encoder.FLOAT_REPR = lambda o: format(o, '.2f')
payload = {'secret':settings.SECRET, 'data':json.dumps(data_to_send)}
if settings.DEBUG:
print "data for server: %s"%payload
try:
r = requests.post(settings.SERVER_URL, data=payload)
print r.text
except Exception, e:
print e
if __name__ == '__main__':
schiffts()
|
{
"content_hash": "a8e5cf48eb1d28202d1984376075939e",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 123,
"avg_line_length": 35.280575539568346,
"alnum_prop": 0.6392740619902121,
"repo_name": "chrigu/schifftszbaern",
"id": "ba5c44f26f089054fe6935f5dd188711e8653f95",
"size": "4929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10029"
},
{
"name": "CSS",
"bytes": "15760"
},
{
"name": "HTML",
"bytes": "15473"
},
{
"name": "JavaScript",
"bytes": "3991"
},
{
"name": "OpenSCAD",
"bytes": "2605"
},
{
"name": "Python",
"bytes": "78379"
}
],
"symlink_target": ""
}
|
"""
Validation errors, and some surrounding helpers.
"""
from __future__ import annotations
from collections import defaultdict, deque
from pprint import pformat
from textwrap import dedent, indent
import heapq
import itertools
import attr
from jsonschema import _utils
WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"])
STRONG_MATCHES: frozenset[str] = frozenset()
_unset = _utils.Unset()
class _Error(Exception):
def __init__(
self,
message,
validator=_unset,
path=(),
cause=None,
context=(),
validator_value=_unset,
instance=_unset,
schema=_unset,
schema_path=(),
parent=None,
type_checker=_unset,
):
super(_Error, self).__init__(
message,
validator,
path,
cause,
context,
validator_value,
instance,
schema,
schema_path,
parent,
)
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
self.context = list(context)
self.cause = self.__cause__ = cause
self.validator = validator
self.validator_value = validator_value
self.instance = instance
self.schema = schema
self.parent = parent
self._type_checker = type_checker
for error in context:
error.parent = self
def __repr__(self):
return f"<{self.__class__.__name__}: {self.message!r}>"
def __str__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
)
if any(m is _unset for m in essential_for_verbose):
return self.message
schema_path = _utils.format_as_index(
container=self._word_for_schema_in_error_message,
indices=list(self.relative_schema_path)[:-1],
)
instance_path = _utils.format_as_index(
container=self._word_for_instance_in_error_message,
indices=self.relative_path,
)
prefix = 16 * " "
return dedent(
f"""\
{self.message}
Failed validating {self.validator!r} in {schema_path}:
{indent(pformat(self.schema, width=72), prefix).lstrip()}
On {instance_path}:
{indent(pformat(self.instance, width=72), prefix).lstrip()}
""".rstrip(),
)
@classmethod
def create_from(cls, other):
return cls(**other._contents())
@property
def absolute_path(self):
parent = self.parent
if parent is None:
return self.relative_path
path = deque(self.relative_path)
path.extendleft(reversed(parent.absolute_path))
return path
@property
def absolute_schema_path(self):
parent = self.parent
if parent is None:
return self.relative_schema_path
path = deque(self.relative_schema_path)
path.extendleft(reversed(parent.absolute_schema_path))
return path
@property
def json_path(self):
path = "$"
for elem in self.absolute_path:
if isinstance(elem, int):
path += "[" + str(elem) + "]"
else:
path += "." + elem
return path
def _set(self, type_checker=None, **kwargs):
if type_checker is not None and self._type_checker is _unset:
self._type_checker = type_checker
for k, v in kwargs.items():
if getattr(self, k) is _unset:
setattr(self, k, v)
def _contents(self):
attrs = (
"message", "cause", "context", "validator", "validator_value",
"path", "schema_path", "instance", "schema", "parent",
)
return dict((attr, getattr(self, attr)) for attr in attrs)
def _matches_type(self):
try:
expected = self.schema["type"]
except (KeyError, TypeError):
return False
if isinstance(expected, str):
return self._type_checker.is_type(self.instance, expected)
return any(
self._type_checker.is_type(self.instance, expected_type)
for expected_type in expected
)
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
class SchemaError(_Error):
"""
A schema was invalid under its corresponding metaschema.
"""
_word_for_schema_in_error_message = "metaschema"
_word_for_instance_in_error_message = "schema"
@attr.s(hash=True)
class RefResolutionError(Exception):
"""
A ref could not be resolved.
"""
_cause = attr.ib()
def __str__(self):
return str(self._cause)
class UndefinedTypeCheck(Exception):
"""
A type checker was asked to check a type it did not have registered.
"""
def __init__(self, type):
self.type = type
def __str__(self):
return f"Type {self.type!r} is unknown to this type checker"
class UnknownType(Exception):
"""
A validator was asked to validate an instance against an unknown type.
"""
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
def __str__(self):
prefix = 16 * " "
return dedent(
f"""\
Unknown type {self.type!r} for validator with schema:
{indent(pformat(self.schema, width=72), prefix).lstrip()}
While checking instance:
{indent(pformat(self.instance, width=72), prefix).lstrip()}
""".rstrip(),
)
class FormatError(Exception):
"""
Validating a format failed.
"""
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
self.cause = self.__cause__ = cause
def __str__(self):
return self.message
class ErrorTree:
"""
ErrorTrees make it easier to check which validations failed.
"""
_instance = _unset
def __init__(self, errors=()):
self.errors = {}
self._contents = defaultdict(self.__class__)
for error in errors:
container = self
for element in error.path:
container = container[element]
container.errors[error.validator] = error
container._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
"""
return index in self._contents
def __getitem__(self, index):
"""
Retrieve the child tree one level down at the given ``index``.
If the index is not in the instance that this tree corresponds
to and is not known by this tree, whatever error would be raised
by ``instance.__getitem__`` will be propagated (usually this is
some subclass of `LookupError`.
"""
if self._instance is not _unset and index not in self:
self._instance[index]
return self._contents[index]
def __setitem__(self, index, value):
"""
Add an error to the tree at the given ``index``.
"""
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
"""
return iter(self._contents)
def __len__(self):
"""
Return the `total_errors`.
"""
return self.total_errors
def __repr__(self):
total = len(self)
errors = "error" if total == 1 else "errors"
return f"<{self.__class__.__name__} ({total} total {errors})>"
@property
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in self._contents.items())
return len(self.errors) + child_errors
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
"""
Create a key function that can be used to sort errors by relevance.
Arguments:
weak (set):
a collection of validation keywords to consider to be
"weak". If there are two errors at the same level of the
instance and one is in the set of weak validation keywords,
the other error will take priority. By default, :kw:`anyOf`
and :kw:`oneOf` are considered weak keywords and will be
superseded by other same-level validation errors.
strong (set):
a collection of validation keywords to consider to be
"strong"
"""
def relevance(error):
validator = error.validator
return (
-len(error.path),
validator not in weak,
validator in strong,
not error._matches_type(),
)
return relevance
relevance = by_relevance()
def best_match(errors, key=relevance):
"""
Try to find an error that appears to be the best match among given errors.
In general, errors that are higher up in the instance (i.e. for which
`ValidationError.path` is shorter) are considered better matches,
since they indicate "more" is wrong with the instance.
If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the
*opposite* assumption is made -- i.e. the deepest error is picked,
since these keywords only need to match once, and any other errors
may not be relevant.
Arguments:
errors (collections.abc.Iterable):
the errors to select from. Do not provide a mixture of
errors from different validation attempts (i.e. from
different instances or schemas), since it won't produce
sensical output.
key (collections.abc.Callable):
the key to use when sorting errors. See `relevance` and
transitively `by_relevance` for more details (the default is
to sort with the defaults of that function). Changing the
default is only useful if you want to change the function
that rates errors but still want the error context descent
done by this function.
Returns:
the best matching error, or ``None`` if the iterable was empty
.. note::
This function is a heuristic. Its return value may change for a given
set of inputs from version to version if better heuristics are added.
"""
errors = iter(errors)
best = next(errors, None)
if best is None:
return
best = max(itertools.chain([best], errors), key=key)
while best.context:
# Calculate the minimum via nsmallest, because we don't recurse if
# all nested errors have the same relevance (i.e. if min == max == all)
smallest = heapq.nsmallest(2, best.context, key=key)
if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]):
return best
best = smallest[0]
return best
|
{
"content_hash": "d7327b57d34be1bf7aa772de64ab21c7",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 79,
"avg_line_length": 28.626262626262626,
"alnum_prop": 0.5821277346506705,
"repo_name": "python-poetry/poetry-core",
"id": "87db3df3a6dde1bbc0aae1128ca21f365e774666",
"size": "11336",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "src/poetry/core/_vendor/jsonschema/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
import copy
import uuid
import ldap
from keystone import assignment
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import identity
from keystone.openstack.common.db.sqlalchemy import session
from keystone.openstack.common.fixture import moxstubout
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests import fakeldap
from keystone.tests import test_backend
CONF = config.CONF
class BaseLDAPIdentity(test_backend.IdentityTests):
def _get_domain_fixture(self):
"""Domains in LDAP are read-only, so just return the static one."""
return self.assignment_api.get_domain(CONF.identity.default_domain_id)
def clear_database(self):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
def reload_backends(self, domain_id):
# Only one backend unless we are using separate domain backends
self.load_backends()
def get_config(self, domain_id):
# Only one conf structure unless we are using separate domain backends
return CONF
def _set_config(self):
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_ldap.conf')])
def test_build_tree(self):
"""Regression test for building the tree names
"""
user_api = identity.backends.ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual(user_api.tree_dn, "ou=Users,%s" % CONF.ldap.suffix)
def test_configurable_allowed_user_actions(self):
user = {'id': 'fake1',
'name': 'fake1',
'password': 'fakepass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
self.identity_api.create_user('fake1', user)
user_ref = self.identity_api.get_user('fake1')
self.assertEqual(user_ref['id'], 'fake1')
user['password'] = 'fakepass2'
self.identity_api.update_user('fake1', user)
self.identity_api.delete_user('fake1')
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
'fake1')
def test_configurable_forbidden_user_actions(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
conf.ldap.user_allow_update = False
conf.ldap.user_allow_delete = False
self.reload_backends(CONF.identity.default_domain_id)
user = {'id': 'fake1',
'name': 'fake1',
'password': 'fakepass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
'fake1',
user)
self.user_foo['password'] = 'fakepass2'
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user,
self.user_foo['id'],
self.user_foo)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.delete_user,
self.user_foo['id'])
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
self.assertDictEqual(user_ref, self.user_foo)
conf = self.get_config(user_ref['domain_id'])
conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
self.reload_backends(user_ref['domain_id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
self.user_foo['id'])
def test_remove_role_grant_from_user_and_project(self):
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertEqual(len(roles_ref), 0)
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_project(self):
new_domain = self._get_domain_fixture()
new_group = {'id': uuid.uuid4().hex, 'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
new_user = {'id': uuid.uuid4().hex, 'name': 'new_user',
'enabled': True,
'domain_id': new_domain['id']}
self.identity_api.create_user(new_user['id'], new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual(roles_ref, [])
self.assertEqual(len(roles_ref), 0)
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertNotEmpty(roles_ref)
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual(len(roles_ref), 0)
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
def test_delete_user_grant_no_user(self):
self.skipTest('Blocked by bug 1101287')
def test_delete_group_grant_no_group(self):
self.skipTest('Blocked by bug 1101287')
def test_get_and_remove_role_grant_by_group_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_correct_role_grant_from_a_mix(self):
self.skipTest('Blocked by bug 1101287')
def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_group_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_user_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_multi_role_grant_by_user_group_on_project_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_role_with_user_and_group_grants(self):
self.skipTest('Blocked by bug 1101287')
def test_delete_user_with_group_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_group_with_user_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_user(self):
domain = self._get_domain_fixture()
user1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex, 'domain_id': domain['id'],
'enabled': True}
self.identity_api.create_user(user1['id'], user1)
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertEqual(len(user_projects), 0)
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertEqual(len(user_projects), 2)
def test_list_projects_for_user_with_grants(self):
domain = self._get_domain_fixture()
new_user = {'id': uuid.uuid4().hex, 'name': 'new_user',
'password': uuid.uuid4().hex, 'enabled': True,
'domain_id': domain['id']}
self.identity_api.create_user(new_user['id'], new_user)
group1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.identity_api.create_group(group1['id'], group1)
group2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.identity_api.create_group(group2['id'], group2)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project2['id'], project2)
self.identity_api.add_user_to_group(new_user['id'],
group1['id'])
self.identity_api.add_user_to_group(new_user['id'],
group2['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
project_id=project2['id'],
role_id=self.role_admin['id'])
user_projects = self.assignment_api.list_projects_for_user(
new_user['id'])
self.assertEqual(len(user_projects), 2)
def test_create_duplicate_user_name_in_different_domains(self):
self.skipTest('Blocked by bug 1101276')
def test_create_duplicate_project_name_in_different_domains(self):
self.skipTest('Blocked by bug 1101276')
def test_create_duplicate_group_name_in_different_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_user_between_domains(self):
self.skipTest('Blocked by bug 1101276')
def test_move_user_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1101276')
def test_move_group_between_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_group_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1101276')
def test_move_project_between_domains(self):
self.skipTest('Blocked by bug 1101276')
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1101276')
def test_get_roles_for_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_role_assignments_unfiltered(self):
new_domain = self._get_domain_fixture()
new_user = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex, 'enabled': True,
'domain_id': new_domain['id']}
self.identity_api.create_user(new_user['id'],
new_user)
new_group = {'id': uuid.uuid4().hex, 'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.assignment_api.create_project(new_project['id'], new_project)
# First check how many role grant already exist
existing_assignments = len(self.assignment_api.list_role_assignments())
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=new_project['id'],
role_id='admin')
# Read back the list of assignments - check it is gone up by 2
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(after_assignments, existing_assignments + 2)
def test_list_role_assignments_bad_role(self):
self.skipTest('Blocked by bug 1221805')
def test_multi_group_grants_on_project_domain(self):
self.skipTest('Blocked by bug 1101287')
def test_list_group_members_missing_entry(self):
"""List group members with deleted user.
If a group has a deleted entry for a member, the non-deleted members
are returned.
"""
# Create a group
group_id = None
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group_id, group)['id']
# Create a couple of users and add them to the group.
user_id = None
user = dict(name=uuid.uuid4().hex, id=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_1_id = self.identity_api.create_user(user_id, user)['id']
self.identity_api.add_user_to_group(user_1_id, group_id)
user_id = None
user = dict(name=uuid.uuid4().hex, id=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_2_id = self.identity_api.create_user(user_id, user)['id']
self.identity_api.add_user_to_group(user_2_id, group_id)
# Delete user 2
# NOTE(blk-u): need to go directly to user interface to keep from
# updating the group.
driver = self.identity_api._select_identity_driver(
user['domain_id'])
driver.user.delete(user_2_id)
# List group users and verify only user 1.
res = self.identity_api.list_users_in_group(group_id)
self.assertEqual(len(res), 1, "Expected 1 entry (user_1)")
self.assertEqual(res[0]['id'], user_1_id, "Expected user 1 id")
def test_list_group_members_when_no_members(self):
# List group members when there is no member in the group.
# No exception should be raised.
group = {
'id': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
self.identity_api.create_group(group['id'], group)
# If this doesn't raise, then the test is successful.
self.identity_api.list_users_in_group(group['id'])
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual(
domains,
[assignment.calc_default_domain()])
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains changes is the new default_domain_id.
new_domain_id = uuid.uuid4().hex
self.opt_in_group('identity', default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(domains[0]['id'], new_domain_id)
def test_authenticate_requires_simple_bind(self):
user = {
'id': 'no_meta',
'name': 'NO_META',
'domain_id': test_backend.DEFAULT_DOMAIN_ID,
'password': 'no_meta2',
'enabled': True,
}
self.identity_api.create_user(user['id'], user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
driver = self.identity_api._select_identity_driver(
user['domain_id'])
driver.user.LDAP_USER = None
driver.user.LDAP_PASSWORD = None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
user_id=user['id'],
password=None,
domain_scope=user['domain_id'])
# (spzala)The group and domain crud tests below override the standard ones
# in test_backend.py so that we can exclude the update name test, since we
# do not yet support the update of either group or domain names with LDAP.
# In the tests below, the update is demonstrated by updating description.
# Refer to bug 1136403 for more detail.
def test_group_crud(self):
group = {
'id': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
self.identity_api.create_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
group['description'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
self.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
def test_create_user_none_mapping(self):
# When create a user where an attribute maps to None, the entry is
# created without that attribute and it doesn't fail with a TypeError.
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'id': 'fake1',
'name': 'fake1',
'password': 'fakepass1',
'domain_id': CONF.identity.default_domain_id,
'default_project_id': 'maps_to_none',
}
# If this doesn't raise, then the test is successful.
self.identity_api.create_user('fake1', user)
def test_update_user_name(self):
"""A user's name cannot be changed through the LDAP driver."""
self.assertRaises(exception.Conflict,
super(BaseLDAPIdentity, self).test_update_user_name)
def test_arbitrary_attributes_are_returned_from_create_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_arbitrary_attributes_are_returned_from_get_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_new_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
class LDAPIdentity(tests.TestCase, BaseLDAPIdentity):
def setUp(self):
super(LDAPIdentity, self).setUp()
self._set_config()
self.clear_database()
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = fixture.mox
self.stubs = fixture.stubs
def test_configurable_allowed_project_actions(self):
tenant = {'id': 'fake1', 'name': 'fake1', 'enabled': True}
self.assignment_api.create_project('fake1', tenant)
tenant_ref = self.assignment_api.get_project('fake1')
self.assertEqual(tenant_ref['id'], 'fake1')
tenant['enabled'] = False
self.assignment_api.update_project('fake1', tenant)
self.assignment_api.delete_project('fake1')
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
'fake1')
def test_configurable_forbidden_project_actions(self):
CONF.ldap.tenant_allow_create = False
CONF.ldap.tenant_allow_update = False
CONF.ldap.tenant_allow_delete = False
self.load_backends()
tenant = {'id': 'fake1', 'name': 'fake1'}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_project,
'fake1',
tenant)
self.tenant_bar['enabled'] = False
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_project,
self.tenant_bar['id'],
self.tenant_bar)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_project,
self.tenant_bar['id'])
def test_configurable_allowed_role_actions(self):
role = {'id': 'fake1', 'name': 'fake1'}
self.assignment_api.create_role('fake1', role)
role_ref = self.assignment_api.get_role('fake1')
self.assertEqual(role_ref['id'], 'fake1')
role['name'] = 'fake2'
self.assignment_api.update_role('fake1', role)
self.assignment_api.delete_role('fake1')
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
'fake1')
def test_configurable_forbidden_role_actions(self):
CONF.ldap.role_allow_create = False
CONF.ldap.role_allow_update = False
CONF.ldap.role_allow_delete = False
self.load_backends()
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_role,
role['id'],
role)
self.role_member['name'] = uuid.uuid4().hex
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_role,
self.role_member['id'],
self.role_member)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_role,
self.role_member['id'])
def test_project_filter(self):
tenant_ref = self.assignment_api.get_project(self.tenant_bar['id'])
self.assertDictEqual(tenant_ref, self.tenant_bar)
CONF.ldap.tenant_filter = '(CN=DOES_NOT_MATCH)'
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.tenant_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assignment_api.get_role(self.role_member['id'])
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_bar['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
self.tenant_bar['id'])
def test_role_filter(self):
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertDictEqual(role_ref, self.role_member)
CONF.ldap.role_filter = '(CN=DOES_NOT_MATCH)'
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
self.role_member['id'])
def test_dumb_member(self):
CONF.ldap.use_dumb_member = True
CONF.ldap.dumb_member = 'cn=dumb,cn=example,cn=com'
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
'dumb')
def test_project_attribute_mapping(self):
CONF.ldap.tenant_name_attribute = 'ou'
CONF.ldap.tenant_desc_attribute = 'description'
CONF.ldap.tenant_enabled_attribute = 'enabled'
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.tenant_name_attribute,
# CONF.ldap.tenant_desc_attribute, and
# CONF.ldap.tenant_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(tenant_ref['id'], self.tenant_baz['id'])
self.assertEqual(tenant_ref['name'], self.tenant_baz['name'])
self.assertEqual(
tenant_ref['description'],
self.tenant_baz['description'])
self.assertEqual(tenant_ref['enabled'], self.tenant_baz['enabled'])
CONF.ldap.tenant_name_attribute = 'description'
CONF.ldap.tenant_desc_attribute = 'ou'
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.tenant_name_attribute,
# CONF.ldap.tenant_desc_attribute, and
# CONF.ldap.tenant_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(tenant_ref['id'], self.tenant_baz['id'])
self.assertEqual(tenant_ref['name'], self.tenant_baz['description'])
self.assertEqual(tenant_ref['description'], self.tenant_baz['name'])
self.assertEqual(tenant_ref['enabled'], self.tenant_baz['enabled'])
def test_project_attribute_ignore(self):
CONF.ldap.tenant_attribute_ignore = ['name',
'description',
'enabled']
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.tenant_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change configs values in tests
# that could affect what the drivers would return up to the manager.
# This solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(tenant_ref['id'], self.tenant_baz['id'])
self.assertNotIn('name', tenant_ref)
self.assertNotIn('description', tenant_ref)
self.assertNotIn('enabled', tenant_ref)
def test_role_attribute_mapping(self):
CONF.ldap.role_name_attribute = 'ou'
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(role_ref['id'], self.role_member['id'])
self.assertEqual(role_ref['name'], self.role_member['name'])
CONF.ldap.role_name_attribute = 'sn'
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(role_ref['id'], self.role_member['id'])
self.assertNotIn('name', role_ref)
def test_role_attribute_ignore(self):
CONF.ldap.role_attribute_ignore = ['name']
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(role_ref['id'], self.role_member['id'])
self.assertNotIn('name', role_ref)
def test_user_enable_attribute_mask(self):
CONF.ldap.user_enabled_mask = 2
CONF.ldap.user_enabled_default = '512'
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
ldap_ = self.identity_api.driver.user.get_connection()
def get_enabled_vals():
user_dn = self.identity_api.driver.user._id_to_dn_string('fake1')
enabled_attr_name = CONF.ldap.user_enabled_attribute
res = ldap_.search_s(user_dn,
ldap.SCOPE_BASE,
query='(sn=fake1)')
return res[0][1][enabled_attr_name]
user = {'id': 'fake1', 'name': 'fake1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user('fake1', user)
# Use assertIs rather than assertTrue because assertIs will assert the
# value is a Boolean as expected.
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = get_enabled_vals()
self.assertEqual(enabled_vals, [512])
user_ref = self.identity_api.get_user('fake1')
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = False
user_ref = self.identity_api.update_user('fake1', user)
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = get_enabled_vals()
self.assertEqual(enabled_vals, [514])
user_ref = self.identity_api.get_user('fake1')
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = True
user_ref = self.identity_api.update_user('fake1', user)
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = get_enabled_vals()
self.assertEqual(enabled_vals, [512])
user_ref = self.identity_api.get_user('fake1')
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
def test_user_api_get_connection_no_user_password(self):
"""Don't bind in case the user and password are blank."""
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf')])
CONF.ldap.url = "fake://memory"
user_api = identity.backends.ldap.UserApi(CONF)
self.stubs.Set(fakeldap, 'FakeLdap',
self.mox.CreateMock(fakeldap.FakeLdap))
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
# we have to track all calls on 'conn' to make sure that
# conn.simple_bind_s is not called
conn = self.mox.CreateMockAnything()
conn = fakeldap.FakeLdap(CONF.ldap.url,
0,
alias_dereferencing=None,
tls_cacertdir=None,
tls_cacertfile=None,
tls_req_cert=2,
use_tls=False).AndReturn(conn)
self.mox.ReplayAll()
user_api.get_connection(user=None, password=None)
def test_wrong_ldap_scope(self):
CONF.ldap.query_scope = uuid.uuid4().hex
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope,
identity.backends.ldap.Identity)
def test_wrong_alias_dereferencing(self):
CONF.ldap.alias_dereferencing = uuid.uuid4().hex
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing,
identity.backends.ldap.Identity)
def test_user_extra_attribute_mapping(self):
CONF.ldap.user_additional_attribute_mapping = ['description:name']
self.load_backends()
user = {
'id': 'extra_attributes',
'name': 'EXTRA_ATTRIBUTES',
'password': 'extra',
'domain_id': CONF.identity.default_domain_id
}
self.identity_api.create_user(user['id'], user)
dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
self.assertTrue(user['name'] in attrs['description'])
def test_parse_extra_attribute_mapping(self):
option_list = ['description:name', 'gecos:password',
'fake:invalid', 'invalid1', 'invalid2:',
'description:name:something']
mapping = self.identity_api.driver.user._parse_extra_attrs(option_list)
expected_dict = {'description': 'name', 'gecos': 'password'}
self.assertDictEqual(expected_dict, mapping)
# TODO(henry-nash): These need to be removed when the full LDAP implementation
# is submitted - see Bugs 1092187, 1101287, 1101276, 1101289
def test_domain_crud(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True, 'description': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
domain['id'],
domain)
self.assertRaises(exception.Conflict,
self.assignment_api.create_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
domain['description'] = uuid.uuid4().hex
self.assertRaises(exception.DomainNotFound,
self.assignment_api.update_domain,
domain['id'],
domain)
self.assertRaises(exception.Forbidden,
self.assignment_api.update_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
self.assertRaises(exception.DomainNotFound,
self.assignment_api.delete_domain,
domain['id'])
self.assertRaises(exception.Forbidden,
self.assignment_api.delete_domain,
CONF.identity.default_domain_id)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
def test_create_domain_case_sensitivity(self):
# domains are read-only, so case sensitivity isn't an issue
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
ref['id'],
ref)
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_project_crud(self):
# NOTE(topol): LDAP implementation does not currently support the
# updating of a project name so this method override
# provides a different update test
project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex, 'enabled': True
}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
@tests.skip_if_cache_disabled('assignment')
def test_cache_layer_project_crud(self):
# NOTE(morganfainberg): LDAP implementation does not currently support
# updating project names. This method override provides a different
# update test.
project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex}
project_id = project['id']
# Create a project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
updated_project = copy.deepcopy(project)
updated_project['description'] = uuid.uuid4().hex
# Update project, bypassing assignment_api manager
self.assignment_api.driver.update_project(project_id,
updated_project)
# Verify get_project still returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify get_project now returns the new project
self.assertDictContainsSubset(
updated_project,
self.assignment_api.get_project(project_id))
# Update project using the assignment_api manager back to original
self.assignment_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Delete project bypassing assignment_api
self.assignment_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify ProjectNotFound now raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
# recreate project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
# delete project
self.assignment_api.delete_project(project_id)
# Verify ProjectNotFound is raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
def test_multi_role_grant_by_user_group_on_project_domain(self):
# This is a partial implementation of the standard test that
# is defined in test_backend.py. It omits both domain and
# group grants. since neither of these are yet supported by
# the ldap backend.
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
user1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
self.identity_api.create_user(user1['id'], user1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.assignment_api.create_project(project1['id'], project1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[1]['id'])
# Although list_grants are not yet supported, we can test the
# alternate way of getting back lists of grants, where user
# and group roles are combined. Only directly assigned user
# roles are available, since group grants are not yet supported
combined_list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(len(combined_list), 2)
self.assertIn(role_list[0]['id'], combined_list)
self.assertIn(role_list[1]['id'], combined_list)
# Finally, although domain roles are not implemented, check we can
# issue the combined get roles call with benign results, since thus is
# used in token generation
combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
user1['id'], CONF.identity.default_domain_id)
self.assertEqual(len(combined_role_list), 0)
def test_list_projects_for_alternate_domain(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_create_grant_no_user(self):
self.skipTest('Blocked by bug 1101287')
def test_create_grant_no_group(self):
self.skipTest('Blocked by bug 1101287')
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_ldap.conf')])
self.opt_in_group('ldap',
user_enabled_emulation=True,
tenant_enabled_emulation=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
def test_project_crud(self):
# NOTE(topol): LDAPIdentityEnabledEmulation will create an
# enabled key in the project dictionary so this
# method override handles this side-effect
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
# self.assignment_api.create_project adds an enabled
# key with a value of True when LDAPIdentityEnabledEmulation
# is used so we now add this expected key to the project dictionary
project['enabled'] = True
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
def test_user_crud(self):
user = {
'id': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex}
self.identity_api.create_user(user['id'], user)
user['enabled'] = True
user_ref = self.identity_api.get_user(user['id'])
del user['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictEqual(user_ref_dict, user)
user['password'] = uuid.uuid4().hex
self.identity_api.update_user(user['id'], user)
user_ref = self.identity_api.get_user(user['id'])
del user['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictEqual(user_ref_dict, user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_user_enable_attribute_mask(self):
self.skipTest(
"Enabled emulation conflicts with enabled mask")
class LdapIdentitySqlAssignment(sql.Base, tests.TestCase, BaseLDAPIdentity):
def _set_config(self):
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_ldap_sql.conf')])
def setUp(self):
super(LdapIdentitySqlAssignment, self).setUp()
self._set_config()
self.clear_database()
self.load_backends()
cache.configure_cache_region(cache.REGION)
self.engine = session.get_engine()
self.addCleanup(session.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.load_fixtures(default_fixtures)
#defaulted by the data load
self.user_foo['enabled'] = True
def test_domain_crud(self):
pass
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual(domains, [assignment.calc_default_domain()])
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains doesn't change because the SQL identity
# backend reads it from the database, which doesn't get updated by
# config change.
orig_default_domain_id = CONF.identity.default_domain_id
new_domain_id = uuid.uuid4().hex
self.opt_in_group('identity', default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(domains[0]['id'], orig_default_domain_id)
def test_project_filter(self):
self.skipTest(
'N/A: Not part of SQL backend')
def test_role_filter(self):
self.skipTest(
'N/A: Not part of SQL backend')
def test_add_role_grant_to_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_get_role_grants_for_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
self.skipTest('Blocked by bug 1221805')
class MultiLDAPandSQLIdentity(sql.Base, tests.TestCase, BaseLDAPIdentity):
"""Class to test common SQL plus individual LDAP backends.
We define a set of domains and domain-specific backends:
- A separate LDAP backend for the default domain
- A separate LDAP backend for domain1
- domain2 shares the same LDAP as domain1, but uses a different
tree attach point
- An SQL backend for all other domains (which will include domain3
and domain4)
Normally one would expect that the default domain would be handled as
part of the "other domains" - however the above provides better
test coverage since most of the existing backend tests use the default
domain.
"""
def setUp(self):
super(MultiLDAPandSQLIdentity, self).setUp()
self._set_config()
self.load_backends()
self.engine = session.get_engine()
self.addCleanup(session.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self._setup_domain_test_data()
# All initial domain data setup complete, time to switch on support
# for separate backends per domain.
self.opt_in_group('identity',
domain_specific_drivers_enabled=True,
domain_config_dir=tests.TESTSDIR)
self._set_domain_configs()
self.clear_database()
self.load_fixtures(default_fixtures)
def _set_config(self):
self.config([tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_multi_ldap_sql.conf')])
def _setup_domain_test_data(self):
def create_domain(domain):
try:
ref = self.assignment_api.create_domain(
domain['id'], domain)
except exception.Conflict:
ref = (
self.assignment_api.get_domain_by_name(domain['name']))
return ref
self.domain_default = create_domain(assignment.calc_default_domain())
self.domain1 = create_domain(
{'id': uuid.uuid4().hex, 'name': 'domain1'})
self.domain2 = create_domain(
{'id': uuid.uuid4().hex, 'name': 'domain2'})
self.domain3 = create_domain(
{'id': uuid.uuid4().hex, 'name': 'domain3'})
self.domain4 = create_domain(
{'id': uuid.uuid4().hex, 'name': 'domain4'})
def _set_domain_configs(self):
# We need to load the domain configs explicitly to ensure the
# test overrides are included.
self.identity_api.domain_configs._load_config(
self.identity_api.assignment_api,
[tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_multi_ldap_sql.conf'),
tests.dirs.tests('keystone.Default.conf')],
'Default')
self.identity_api.domain_configs._load_config(
self.identity_api.assignment_api,
[tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_multi_ldap_sql.conf'),
tests.dirs.tests('keystone.domain1.conf')],
'domain1')
self.identity_api.domain_configs._load_config(
self.identity_api.assignment_api,
[tests.dirs.etc('keystone.conf.sample'),
tests.dirs.tests('test_overrides.conf'),
tests.dirs.tests('backend_multi_ldap_sql.conf'),
tests.dirs.tests('keystone.domain2.conf')],
'domain2')
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
Test Plan:
- Create a user in each of the domains
- Make sure that you can only find a given user in its
relevant domain
- Make sure that for a backend that supports multiple domains
you can get the users via any of the domain scopes
"""
def create_user(domain_id):
user = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
self.identity_api.create_user(user['id'], user)
return user
userd = create_user(CONF.identity.default_domain_id)
user1 = create_user(self.domain1['id'])
user2 = create_user(self.domain2['id'])
user3 = create_user(self.domain3['id'])
user4 = create_user(self.domain4['id'])
# Now check that I can read user1 with the appropriate domain
# scope, but won't find it if the wrong scope is used
ref = self.identity_api.get_user(
userd['id'], domain_scope=CONF.identity.default_domain_id)
del userd['password']
self.assertDictEqual(ref, userd)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
userd['id'],
domain_scope=self.domain1['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
userd['id'],
domain_scope=self.domain2['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
userd['id'],
domain_scope=self.domain3['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
userd['id'],
domain_scope=self.domain4['id'])
ref = self.identity_api.get_user(
user1['id'], domain_scope=self.domain1['id'])
del user1['password']
self.assertDictEqual(ref, user1)
ref = self.identity_api.get_user(
user2['id'], domain_scope=self.domain2['id'])
del user2['password']
self.assertDictEqual(ref, user2)
# Domains 3 and 4 share the same backend, so you should be
# able to see user3 and 4 from either
ref = self.identity_api.get_user(
user3['id'], domain_scope=self.domain3['id'])
del user3['password']
self.assertDictEqual(ref, user3)
ref = self.identity_api.get_user(
user4['id'], domain_scope=self.domain4['id'])
del user4['password']
self.assertDictEqual(ref, user4)
ref = self.identity_api.get_user(
user3['id'], domain_scope=self.domain4['id'])
self.assertDictEqual(ref, user3)
ref = self.identity_api.get_user(
user4['id'], domain_scope=self.domain3['id'])
self.assertDictEqual(ref, user4)
def test_scanning_of_config_dir(self):
"""Test the Manager class scans the config directory.
The setup for the main tests above load the domain configs directly
so that the test overrides can be included. This test just makes sure
that the standard config directory scanning does pick up the relevant
domain config files.
"""
# Confirm that config has drivers_enabled as True, which we will
# check has been set to False later in this test
self.assertTrue(config.CONF.identity.domain_specific_drivers_enabled)
self.load_backends()
# Execute any command to trigger the lazy loading of domain configs
self.identity_api.list_users(domain_scope=self.domain1['id'])
# ...and now check the domain configs have been set up
self.assertIn('default', self.identity_api.domain_configs)
self.assertIn(self.domain1['id'], self.identity_api.domain_configs)
self.assertIn(self.domain2['id'], self.identity_api.domain_configs)
self.assertNotIn(self.domain3['id'], self.identity_api.domain_configs)
self.assertNotIn(self.domain4['id'], self.identity_api.domain_configs)
# Finally check that a domain specific config contains items from both
# the primary config and the domain specific config
conf = self.identity_api.domain_configs.get_domain_conf(
self.domain1['id'])
# This should now be false, as is the default, since this is not
# set in the standard primary config file
self.assertFalse(conf.identity.domain_specific_drivers_enabled)
# ..and make sure a domain-specifc options is also set
self.assertEqual(conf.ldap.url, 'fake://memory1')
def test_add_role_grant_to_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_get_role_grants_for_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
self.skipTest('Blocked by bug 1221805')
|
{
"content_hash": "ccda0fbe0a822a0ae837f6116a436b96",
"timestamp": "",
"source": "github",
"line_count": 1414,
"max_line_length": 79,
"avg_line_length": 44.15205091937765,
"alnum_prop": 0.5924941135013054,
"repo_name": "dsiddharth/access-keys",
"id": "0d8bc66528c170b792f8da7c7a60f9e4f1e674a1",
"size": "63089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/tests/test_backend_ldap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2619408"
},
{
"name": "Shell",
"bytes": "11206"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('..')
from deepgraph.graph import *
from deepgraph.solver import *
from deepgraph.nn.core import *
from deepgraph.nn.loss import *
from deepgraph.nn.conv import *
from deepgraph.nn.init import *
from deepgraph.utils.datasets import *
from theano.tensor.nnet import relu
data = load_data("../data/mnist.pkl.gz")
train_x, train_y = data[0]
val_x, val_y = data[1]
batch_size = 600
g = Graph("test")
# Until now, sequence matters to map inputs to compiled model inlets
# In addition, we need to specify the shape we want to have the input in such that deepgraph
# can calculate necessary mem.
data = Data(g, "data", T.matrix, shape=(-1, 1, 28, 28))
label = Data(g, "label", T.ivector, shape=(-1,), config={
"phase": PHASE_TRAIN
})
conv_0 = Conv2D(g, "conv_1", inputs=[data],config={
"channels": 20,
"kernel": (5, 5),
"activation": relu,
"weight_filler": xavier(gain="relu"),
"bias_filler": constant(0)
})
bn_0 = BN(g, "bn_0", inputs=[conv_0])
pool_0 = Pool(g, "pool_0", inputs=[bn_0], config={
"kernel": (2, 2)
})
lrn_0 = LRN(g, "lrn_0", inputs=[pool_0])
conv_1 = Conv2D(g, "conv_2", inputs=[lrn_0], config={
"channels": 50,
"kernel": (5, 5),
"activation": relu,
"weight_filler": xavier(gain="relu"),
"bias_filler": constant(0)
})
bn_1 = BN(g, "bn_1", inputs=[conv_1])
pool_1 = Pool(g, "pool_1", inputs=[conv_1], config={
"kernel": (2, 2)
})
lrn_1 = LRN(g, "lrn_1", inputs=[pool_1])
flatten = Flatten(g, "flatten", inputs=[lrn_1],config={
"dims": 2
})
hidden_0 = Dense(g, "tanh_0", inputs=[flatten], config={
"out": 500,
"weight_filler": xavier(),
"bias_filler": constant(0.0001),
"activation": T.tanh
})
soft = Softmax(g, "softmax", inputs=[hidden_0],config={
"out": 10,
"weight_filler": xavier(),
"bias_filler": constant(0.0001)
})
argm = Argmax(g, "argmax", inputs=[soft],config={
"is_output": True
})
# Error and loss terms
error = Error(g, "error", inputs=[argm, label])
loss = NegativeLogLikelyHoodLoss(g, "loss", inputs=[soft, label])
l2 = L2RegularizationLoss(g, "l2", inputs=[soft, hidden_0],config={"loss_weight": 0.0001})
g.compile(train_inputs=[train_x, train_y], val_inputs=[val_x, val_y], batch_size=batch_size)
log("Starting optimization phase", LOG_LEVEL_INFO)
solver = Solver(lr=0.1)
solver.load(g)
solver.optimize(10)
solver.learning_rate = 0.02
solver.optimize(10)
log("Testing inference", LOG_LEVEL_INFO)
for idx in range(40):
i_train_x = train_x.get_value()[idx]
print g.infer([i_train_x.reshape((1, 1, 28, 28))])
|
{
"content_hash": "22ec6a905ebe83962fbe26ec1b90c0cd",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 92,
"avg_line_length": 28.208791208791208,
"alnum_prop": 0.6408258667705493,
"repo_name": "sebastian-schlecht/deepgraph",
"id": "83e29d596c5add82b1d2b35081000f03e1fabd70",
"size": "2567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legacy/mnist_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3911641"
},
{
"name": "Python",
"bytes": "122339"
}
],
"symlink_target": ""
}
|
"""
=======================
YAML-formatted data I/O
=======================
This module handles input from YAML formatted data. Because this code is in
bad need of cleanup and because in the long run, I (Stephen Edie) hope to
replace the input mechanism with something cleaner and faster (not involving
YAML), the documentation for this module is limited at this time.
---------
Reference
---------
.. autodoc puts stuff extracted from docstrings here.
"""
from .yaml_io import *
__all__ = yaml_io.__all__[:]
del yaml_io
|
{
"content_hash": "143635d9e66ee18e2e93a67bd32b6a83",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 22.083333333333332,
"alnum_prop": 0.6471698113207547,
"repo_name": "VlachosGroup/VlachosGroupAdditivity",
"id": "530f8a0e0eeb27009b925e1310d8d31023d27f04",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgradd/yaml_io/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "288237"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteWordList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteWordList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteWordList, self).__init__(temboo_session, '/Library/Wordnik/WordList/DeleteWordList')
def new_input_set(self):
return DeleteWordListInputSet()
def _make_result_set(self, result, path):
return DeleteWordListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteWordListChoreographyExecution(session, exec_id, path)
class DeleteWordListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteWordList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from Wordnik.)
"""
super(DeleteWordListInputSet, self)._set_input('APIKey', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, string) The Password of the Wordnik account.)
"""
super(DeleteWordListInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) The Username of the Wordnik account.)
"""
super(DeleteWordListInputSet, self)._set_input('Username', value)
def set_WordList(self, value):
"""
Set the value of the WordList input for this Choreo. ((required, string) The perma-link of the WordLIst to delete.)
"""
super(DeleteWordListInputSet, self)._set_input('WordList', value)
class DeleteWordListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteWordList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Wordnik.)
"""
return self._output.get('Response', None)
class DeleteWordListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteWordListResultSet(response, path)
|
{
"content_hash": "b822a8262fed1dfb286d52643f74873b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 123,
"avg_line_length": 39.59154929577465,
"alnum_prop": 0.6929918178584134,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "bd3734b2057e56f6d92d5a38280d551ddc78bf74",
"size": "3657",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Wordnik/WordList/DeleteWordList.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
"""Split RR7 products, for some reason!"""
# stdlib
import sys
import re
# 3rd Party
from pyiem.util import get_dbconn, utc
def main():
"""Go"""
pgconn = get_dbconn("afos")
acursor = pgconn.cursor()
payload = getattr(sys.stdin, "buffer", sys.stdin).read()
payload = payload.decode("ascii", errors="ignore")
data = payload.replace("\r\r\n", "z")
tokens = re.findall(r"(\.A [A-Z0-9]{3} .*?=)", data)
utcnow = utc().replace(second=0, microsecond=0)
for token in tokens:
# print(tokens)
sql = "INSERT into products (pil, data, entered) values(%s,%s,%s)"
sqlargs = (f"RR7{token[3:6]}", token.replace("z", "\n"), utcnow)
acursor.execute(sql, sqlargs)
acursor.close()
pgconn.commit()
pgconn.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "ffe11e8cd967daf2327e8aa9eec5bbe2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 22.805555555555557,
"alnum_prop": 0.584652862362972,
"repo_name": "akrherz/pyWWA",
"id": "69404658e18e5b3f720ed4e837ee4874abfcd1ae",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parsers/pywwa/workflows/rr7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207659"
},
{
"name": "Shell",
"bytes": "4472"
}
],
"symlink_target": ""
}
|
class PossibleApp(object):
"""A factory class that can be used to create a running instance of app.
Call Create() to launch the app and begin manipulating it.
"""
def __init__(self, app_type, target_os):
self._app_type = app_type
self._target_os = target_os
self._platform = None
self._platform_backend = None
def __repr__(self):
return 'PossibleApp(app_type=%s)' % self.app_type
@property
def app_type(self):
return self._app_type
@property
def target_os(self):
"""Target OS, the app will run on."""
return self._target_os
@property
def platform(self):
self._InitPlatformIfNeeded()
return self._platform
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
|
{
"content_hash": "664b4e2500f9021e9d8bd8636acd8760",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 24.42105263157895,
"alnum_prop": 0.6745689655172413,
"repo_name": "cricketclubucd/davisdragons",
"id": "ee53f5bc1d3c188bfbf2d5a14cd65d9c34663b2c",
"size": "1092",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "platform-tools/systrace/catapult/telemetry/telemetry/internal/app/possible_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6390"
},
{
"name": "CSS",
"bytes": "3440"
},
{
"name": "HTML",
"bytes": "5451585"
},
{
"name": "JavaScript",
"bytes": "53731"
},
{
"name": "Python",
"bytes": "3894020"
},
{
"name": "SCSS",
"bytes": "24096"
},
{
"name": "Shell",
"bytes": "2834"
},
{
"name": "TypeScript",
"bytes": "87435"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20141010_1432'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, unique=True, default='testcat'),
preserve_default=False,
),
]
|
{
"content_hash": "c1c9d99883a038e9f643988f831b8843",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 96,
"avg_line_length": 23.9,
"alnum_prop": 0.6192468619246861,
"repo_name": "sai-prasanna/simple-django-blog",
"id": "03a16f77d7739abd5dbeb87a17df171e6aff579e",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0003_category_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10782"
},
{
"name": "JavaScript",
"bytes": "9868"
},
{
"name": "Python",
"bytes": "11403"
}
],
"symlink_target": ""
}
|
from cattle import concurrency # NOQA
import sys
import os
import logging
from logging.handlers import RotatingFileHandler
import argparse
_LOG_SIZE = 2097152
_LOG_COUNT = 2
if __name__ == '__main__':
dist = os.path.join(os.path.dirname(__file__), "dist")
if os.path.exists(dist):
sys.path.insert(0, dist)
from cattle import plugins, Config, process_manager
from cattle.agent.event import EventClient
from cattle.type_manager import types, get_type_list, LIFECYCLE
log = logging.getLogger("agent")
def _setup_logger():
format = '%(asctime)s %(levelname)s %(name)s [%(thread)s] ' \
'[%(filename)s:%(lineno)s] %(message)s '
level = logging.INFO
if Config.debug():
level = logging.DEBUG
logging.root.setLevel(level)
file_handler = RotatingFileHandler(Config.log(), maxBytes=_LOG_SIZE,
backupCount=_LOG_COUNT)
file_handler.setFormatter(logging.Formatter(format))
std_err_handler = logging.StreamHandler(sys.stderr)
std_err_handler.setFormatter(logging.Formatter(format))
std_err_handler.setLevel(logging.WARN)
logging.root.addHandler(file_handler)
logging.root.addHandler(std_err_handler)
def _gather_events():
events = []
for t in types():
if hasattr(t, "events"):
for e in t.events():
events.append(e)
return events
def _args():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("--access-key", default=Config.access_key(),
help='Default value from CATTLE_ACCESS_KEY')
parser.add_argument("--secret-key", default=Config.secret_key(),
help='Default value from CATTLE_SECRET_KEY')
parser.add_argument("--url", default=Config.api_url(),
help='Default value from CATTLE_URL')
parser.add_argument("--workers", default=Config.workers(),
help='Default value from CATTLE_WORKERS')
parser.add_argument("--agent-id")
return parser.parse_args()
def main():
if Config.setup_logger():
_setup_logger()
else:
logging.basicConfig(level=logging.INFO)
args = _args()
Config.set_access_key(args.access_key)
Config.set_secret_key(args.secret_key)
Config.set_api_url(args.url)
process_manager.init()
plugins.load()
log.info('API URL %s', Config.api_url())
client = EventClient(Config.api_url(), auth=Config.api_auth(),
workers=args.workers, agent_id=args.agent_id)
events = _gather_events()
log.info("Subscribing to %s", events)
for startup in get_type_list(LIFECYCLE):
startup.on_startup()
client.run(events)
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "cd9a2f03ef27dab2d4c8eb5d47594938",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 72,
"avg_line_length": 27.067961165048544,
"alnum_prop": 0.6276901004304161,
"repo_name": "cloudnautique/cloud-cattle",
"id": "33356902ec1b09797a5a5fbafa407816f205e08c",
"size": "2811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/agent/src/agents/pyagent/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "178006"
},
{
"name": "Java",
"bytes": "3161966"
},
{
"name": "JavaScript",
"bytes": "2329874"
},
{
"name": "Python",
"bytes": "330745"
},
{
"name": "Shell",
"bytes": "59604"
}
],
"symlink_target": ""
}
|
"""
A platform independent file lock that supports the with-statement.
.. autodata:: filelock.__version__
:no-value:
"""
from __future__ import annotations
import sys
import warnings
from ._api import AcquireReturnProxy, BaseFileLock
from ._error import Timeout
from ._soft import SoftFileLock
from ._unix import UnixFileLock, has_fcntl
from ._windows import WindowsFileLock
from .version import version
#: version of the project as a string
__version__: str = version
if sys.platform == "win32": # pragma: win32 cover
_FileLock: type[BaseFileLock] = WindowsFileLock
else: # pragma: win32 no cover
if has_fcntl:
_FileLock: type[BaseFileLock] = UnixFileLock
else:
_FileLock = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
#: Alias for the lock, which should be used for the current platform. On Windows, this is an alias for
# :class:`WindowsFileLock`, on Unix for :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
FileLock: type[BaseFileLock] = _FileLock
__all__ = [
"__version__",
"FileLock",
"SoftFileLock",
"Timeout",
"UnixFileLock",
"WindowsFileLock",
"BaseFileLock",
"AcquireReturnProxy",
]
|
{
"content_hash": "31c05a25afd607f724f531ecfc5f2668",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 102,
"avg_line_length": 25.958333333333332,
"alnum_prop": 0.6958266452648475,
"repo_name": "pybuilder/pybuilder",
"id": "afcdb706d7a5e1ff89a9271bc19938616d537909",
"size": "1246",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/_vendor/filelock/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1567"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "Perl",
"bytes": "4025"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "2699121"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
}
|
"""This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_builtin, init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import load_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
load_dynamic = None
# Directly exposed by this module
from importlib._bootstrap import new_module
from importlib._bootstrap import cache_from_source, source_from_cache
from importlib import _bootstrap
from importlib import machinery
import os
import sys
import tokenize
import warnings
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_magic():
"""Return the magic number for .pyc or .pyo files."""
return _bootstrap._MAGIC_BYTES
def get_tag():
"""Return the magic tag for .pyc or .pyo files."""
return sys.implementation.cache_tag
def get_suffixes():
warnings.warn('imp.get_suffixes() is deprecated; use the constants '
'defined on importlib.machinery instead',
DeprecationWarning, 2)
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'U', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""Null import object."""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibiilty support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, _bootstrap.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
def load_source(name, pathname, file=None):
msg = ('imp.load_source() is deprecated; use '
'importlib.machinery.SourceFileLoader(name, pathname).load_module()'
' instead')
warnings.warn(msg, DeprecationWarning, 2)
_LoadSourceCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourceFileLoader(name, pathname)
return module
class _LoadCompiledCompatibility(_HackedGetData,
_bootstrap.SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
def load_compiled(name, pathname, file=None):
msg = ('imp.load_compiled() is deprecated; use '
'importlib.machinery.SourcelessFileLoader(name, pathname).'
'load_module() instead ')
warnings.warn(msg, DeprecationWarning, 2)
_LoadCompiledCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourcelessFileLoader(name, pathname)
return module
def load_package(name, path):
msg = ('imp.load_package() is deprecated; use either '
'importlib.machinery.SourceFileLoader() or '
'importlib.machinery.SourcelessFileLoader() instead')
warnings.warn(msg, DeprecationWarning, 2)
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
return _bootstrap.SourceFileLoader(name, path).load_module(name)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_bootstrap._ERR_MSG.format(name), name=name)
encoding = None
if mode == 'U':
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or type(module) != type(sys):
raise TypeError("reload() argument must be module")
name = module.__name__
if name not in sys.modules:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name and parent_name not in sys.modules:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
module.__loader__.load_module(name)
# The module may have replaced itself in sys.modules!
return sys.modules[module.__name__]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
|
{
"content_hash": "b4fa9caba5396f54b6247d02cbc9daa3",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 83,
"avg_line_length": 34.371024734982335,
"alnum_prop": 0.6166341112367636,
"repo_name": "timm/timmnix",
"id": "408838387befba18d0f5014418727b3b824231c9",
"size": "9727",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib-python/3/imp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import collectd
# Function to split DataType definition into string (used in map function)
def split_to_value(item):
return item.split(":")[0]
# Read types.db to memory
def read_types_from_file():
global data_sources
data_sources = {}
types_file = open('./collectd_plugin/types.db', 'r')
for str in types_file:
exploded_line = str.split('\t',1)
name = exploded_line[0]
values_sequence = exploded_line[1]
values = values_sequence.split(", ")
values[0] = values[0].lstrip()
data_sources[name] = map(split_to_value, values)
types_file.close
def init(data=None):
global f
f = open('./collectd_plugin/file.txt', 'w') # open log file
read_types_from_file() # read to mem
def shutdown(data=None):
f.close
def write(vl, data=None):
f.write(str(vl.time) + ";type:" + "org.collectd." + str(vl.plugin).capitalize() + ";type:" + str(vl.type_instance) + ";values:" + str(vl.values) + ";meta:" + str(data_sources[vl.type]) + "\n")
collectd.register_init(init);
collectd.register_shutdown(shutdown);
collectd.register_write(write);
|
{
"content_hash": "f241938085bb024a288bcd922f2c4e7b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 193,
"avg_line_length": 26.38095238095238,
"alnum_prop": 0.644404332129964,
"repo_name": "ngmon/ngmon",
"id": "1d58aebb6b6f1dcc9c2854cd5a7acb8dfb699b6c",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_nonjava/collectd_plugin/src/momo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9985"
},
{
"name": "Java",
"bytes": "59210"
},
{
"name": "Python",
"bytes": "1221"
},
{
"name": "Shell",
"bytes": "1274"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import os
import gui_menu
from multiprocessing import Process, Pipe
import util
import conf
import filters
import threading
import time
# OSX has an error when launching GUI subprocesses
# If use_config_gui is false, the program will just watch ~/.headmouse
use_config_gui = sys.platform != 'darwin'
config = conf.render()
output_driver = None
vision_driver = None
camera_driver = None
smoother = None
needs_camera_reinit = False
needs_vision_reinit = False
needs_shutdown = False
needs_restart = False
def update_component(c_name, c_value):
global smoother, camera_driver, vision_driver, output_driver, needs_camera_reinit, needs_vision_reinit
if c_name == 'camera':
camera_driver = __import__('cameras.' + c_value).__dict__[c_value]
needs_camera_reinit = True
elif c_name == 'algorithm':
vision_driver = __import__('vision.' + c_value).__dict__[c_value]
needs_vision_reinit = True
elif c_name == 'output':
output_driver = __import__('output_drivers.' + c_value).__dict__[c_value]
elif c_name == 'smoothing':
smoother = filters.ema_smoother(c_value)
elif c_name == 'camera_dimensions':
needs_camera_reinit = True
elif c_name == 'camera_device_id':
needs_camera_reinit = True
def handle_gui_process_messages(parent_conn, gui_child_process, polling_wait=.001):
global needs_restart, needs_shutdown
if parent_conn.poll(polling_wait):
pipe_data = parent_conn.recv()
if 'config' in pipe_data:
config.update_all(pipe_data['config'])
elif 'control' in pipe_data:
control_message = pipe_data['control']
if control_message == 'restart':
needs_restart = True
else:
print("Unhandled control message", control_message)
else:
if not gui_child_process.is_alive():
print("GUI component has terminated.")
needs_shutdown = True
def watch_gui_process(parent_conn, gui_child_process):
while not needs_shutdown:
time.sleep(.03)
handle_gui_process_messages(parent_conn, gui_child_process)
def watch_config():
# Todo: remove global declaration, since dicts are mutable, should work.
global config
while not needs_shutdown:
time.sleep(1)
config.update_all(conf.render())
if __name__ == '__main__':
if use_config_gui:
# GUI process setup
parent_conn, child_conn = Pipe()
gui_child_process = Process(target=gui_menu.initialize, args=(child_conn,))
gui_child_process.start()
handle_gui_process_messages(parent_conn, gui_child_process, polling_wait=1)
gui_watcher_thread = threading.Thread(target=watch_gui_process, args=(parent_conn, gui_child_process))
gui_watcher_thread.start()
else:
print("Gui menu can't be launched directly on OS X, you can launch gui_menu.py in a separete process.")
config_file_watcher = threading.Thread(target=watch_config)
config_file_watcher.start()
# Application restart involves multiple processes and can be triggered from multiple places.
def restart():
if use_config_gui:
gui_child_process.terminate()
python = sys.executable
os.execl(python, python, * sys.argv)
xy_delta_gen = filters.relative_movement()
fps = util.simple_fps()
freq = 60
if use_config_gui:
send_fps = util.Every_n(freq, lambda: parent_conn.send(str( float("{0:.2f}".format(fps.next() * freq)))))
else:
send_fps = util.Every_n(freq, lambda: print(str( float("{0:.2f}".format(fps.next() * freq)))))
config.register_callback('output', update_component)
config.register_callback('algorithm', update_component)
config.register_callback('camera', update_component)
config.register_callback('smoothing', update_component)
config.register_callback('camera_dimensions', update_component)
config.register_callback('camera_device_id', update_component)
config.execute_all_callbacks()
# Todo: See if there's a cleaner way to structure the nested whiles, approval of 3136 would have been nice.
while not (needs_shutdown or needs_restart):
with camera_driver.Camera(config) as cam:
needs_camera_reinit = False
while not (needs_camera_reinit or needs_shutdown or needs_restart):
with vision_driver.Vision(cam, config) as viz:
needs_vision_reinit = False
display_frame = util.Every_n(4, viz.display_image)
while not (needs_vision_reinit or needs_camera_reinit or needs_shutdown or needs_restart):
try:
# Frame processing
viz.get_image()
coords = viz.process()
if coords is not None and None not in coords:
coords = filters.mirror(coords)
abs_pos_x, abs_pos_y, abs_pos_z = coords
xy = xy_delta_gen.send((abs_pos_x, abs_pos_y))
if not filters.detect_outliers(xy, config['max_input_distance']):
xy = smoother.send(xy)
xy = filters.accelerate(xy, config)
output_driver.send_xy(xy)
if config['display']:
display_frame.next()
send_fps.next()
except KeyboardInterrupt:
needs_restart = False
needs_shutdown = True
if needs_restart:
restart()
if use_config_gui:
gui_child_process.terminate()
sys.exit()
|
{
"content_hash": "49fdfc571a2b81fe1422ce05f6931cfb",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 113,
"avg_line_length": 36.239263803680984,
"alnum_prop": 0.5994582698493313,
"repo_name": "aranchelk/headmouse",
"id": "9854ce22ae4c3f37e386d074882768dbd428b3a9",
"size": "6666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "headmouse/hm2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "3681"
},
{
"name": "Python",
"bytes": "70454"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
"securityAlertPolicyName": _SERIALIZER.url("security_alert_policy_name", security_alert_policy_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
"securityAlertPolicyName": _SERIALIZER.url("security_alert_policy_name", security_alert_policy_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class WorkspaceManagedSqlServerSecurityAlertPolicyOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.synapse.SynapseManagementClient`'s
:attr:`workspace_managed_sql_server_security_alert_policy` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
**kwargs: Any
) -> _models.ServerSecurityAlertPolicy:
"""Get server's security alert policy.
Get a workspace managed sql server's security alert policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy. "Default" Required.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerSecurityAlertPolicy or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ServerSecurityAlertPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[_models.ServerSecurityAlertPolicy] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
parameters: Union[_models.ServerSecurityAlertPolicy, IO],
**kwargs: Any
) -> Optional[_models.ServerSecurityAlertPolicy]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ServerSecurityAlertPolicy]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ServerSecurityAlertPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
parameters: _models.ServerSecurityAlertPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ServerSecurityAlertPolicy]:
"""Create or Update server's threat detection policy.
Create or Update a workspace managed sql server's threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy. "Default" Required.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:param parameters: The workspace managed sql server security alert policy. Required.
:type parameters: ~azure.mgmt.synapse.models.ServerSecurityAlertPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ServerSecurityAlertPolicy or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ServerSecurityAlertPolicy]:
"""Create or Update server's threat detection policy.
Create or Update a workspace managed sql server's threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy. "Default" Required.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:param parameters: The workspace managed sql server security alert policy. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ServerSecurityAlertPolicy or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
security_alert_policy_name: Union[str, _models.SecurityAlertPolicyNameAutoGenerated],
parameters: Union[_models.ServerSecurityAlertPolicy, IO],
**kwargs: Any
) -> LROPoller[_models.ServerSecurityAlertPolicy]:
"""Create or Update server's threat detection policy.
Create or Update a workspace managed sql server's threat detection policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param security_alert_policy_name: The name of the security alert policy. "Default" Required.
:type security_alert_policy_name: str or
~azure.mgmt.synapse.models.SecurityAlertPolicyNameAutoGenerated
:param parameters: The workspace managed sql server security alert policy. Is either a model
type or a IO type. Required.
:type parameters: ~azure.mgmt.synapse.models.ServerSecurityAlertPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ServerSecurityAlertPolicy or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.synapse.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ServerSecurityAlertPolicy] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
security_alert_policy_name=security_alert_policy_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ServerSecurityAlertPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies/{securityAlertPolicyName}"
}
@distributed_trace
def list(
self, resource_group_name: str, workspace_name: str, **kwargs: Any
) -> Iterable["_models.ServerSecurityAlertPolicy"]:
"""Get server's threat detection policies.
Get workspace managed sql server's threat detection policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerSecurityAlertPolicy or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.ServerSecurityAlertPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
cls: ClsType[_models.ServerSecurityAlertPolicyListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ServerSecurityAlertPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/securityAlertPolicies"
}
|
{
"content_hash": "d33c1976e97abaaa59a75f4bc6419089",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 186,
"avg_line_length": 46.31382978723404,
"alnum_prop": 0.6632211630488879,
"repo_name": "Azure/azure-sdk-for-python",
"id": "308a562b141564a5d5b6b29318c40d319a6f38c0",
"size": "26621",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_workspace_managed_sql_server_security_alert_policy_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import sys
from django.utils.importlib import import_module
def setup_environ(dunder_file=None, project_path=None, relative_project_path=None, settings_path=None):
assert not (dunder_file and project_path), ("You must not specify both "
"__file__ and project_path")
if dunder_file is not None:
file_path = os.path.abspath(os.path.dirname(dunder_file))
if relative_project_path is not None:
project_path = os.path.abspath(os.path.join(file_path, *relative_project_path))
else:
project_path = file_path
# the basename must be the project name and importable.
project_name = os.path.basename(project_path)
# setup Django correctly (the hard-coding of settings is only temporary.
# carljm's proposal will remove that)
if settings_path is None:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ["DJANGO_SETTINGS_MODULE"] = "%s.settings" % project_name
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings_path
# ensure the importablity of project
sys.path.append(os.path.join(project_path, os.pardir))
import_module(project_name)
sys.path.pop()
# adds an app directory for users as a reliable location for
# Django apps
sys.path.insert(0, os.path.join(project_path, "apps"))
|
{
"content_hash": "4f0a26ec09b80048c59e1edd01c7374e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 103,
"avg_line_length": 37.80555555555556,
"alnum_prop": 0.6700955180014695,
"repo_name": "zhiwehu/IBookmark",
"id": "10c90429a6adb8484418adb0086125119b848f6b",
"size": "1361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "460708"
},
{
"name": "Python",
"bytes": "132225"
}
],
"symlink_target": ""
}
|
import artistools.makemodel.botyanski2017
|
{
"content_hash": "4bd2cadac8581ce2e5906ddcba1e21cc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 41,
"avg_line_length": 41,
"alnum_prop": 0.926829268292683,
"repo_name": "lukeshingles/artistools",
"id": "8eceb50b381a86e0da6e4cb1446f4846a108ade1",
"size": "65",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artistools/makemodel/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "437553"
},
{
"name": "Shell",
"bytes": "497"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('order', '0032_auto_20200427_0044'),
]
operations = [
migrations.AddField(
model_name='purchaseorderattachment',
name='user',
field=models.ForeignKey(blank=True, help_text='User', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='salesorderattachment',
name='user',
field=models.ForeignKey(blank=True, help_text='User', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
{
"content_hash": "73a0a96a45b030f73e8a37e7e6599edf",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 152,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.652122641509434,
"repo_name": "inventree/InvenTree",
"id": "2c3abbb0d0d36219b90fccb45131d8ccb71d65c0",
"size": "897",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InvenTree/order/migrations/0033_auto_20200512_1033.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246444"
},
{
"name": "Dockerfile",
"bytes": "7169"
},
{
"name": "HTML",
"bytes": "586821"
},
{
"name": "JavaScript",
"bytes": "1970070"
},
{
"name": "Procfile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2606104"
},
{
"name": "Shell",
"bytes": "27115"
}
],
"symlink_target": ""
}
|
import rosunit
import unittest
import re
from flexbe_core.core.user_data import UserData
from .logger import Logger
from .test_interface import TestInterface
from .test_context import TestContext, LaunchContext
from .data_provider import DataProvider
class Tester(object):
def __init__(self):
self._tests = dict()
def run_test(self, name, config):
try:
self._verify_config(config)
except Exception as e:
Logger.print_title(name, 'Invalid', None)
Logger.print_error('invalid test specification!\n\t%s' % str(e))
Logger.print_result(name, False)
self._tests['test_%s_pass' % name] = self._test_config_invalid(str(e))
return 0
# allow to specify behavior name instead of generated module and class
if 'name' in config:
config['path'] += '.%s_sm' % re.sub(r'[^\w]', '_', config['name'].lower())
config['class'] = '%sSM' % re.sub(r'[^\w]', '', config['name'])
import_only = config.get('import_only', False)
Logger.print_title(name, config['class'], config['outcome'] if not import_only else None)
# import test subject
try:
test_interface = TestInterface(config['path'], config['class'])
except Exception as e:
Logger.print_failure('unable to import state %s (%s):\n\t%s' %
(config['class'], config['path'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
if not import_only:
# prepare test context
context = None
if 'launch' in config:
context = LaunchContext(config['launch'], config.get('wait_cond', 'True'))
else:
context = TestContext()
# load data source
try:
data = DataProvider(bagfile=config.get('data', None))
except Exception as e:
Logger.print_failure('unable to load data source %s:\n\t%s' %
(config['data'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# run test context
with context:
if not context.verify():
Logger.print_error('failed to initialize test context:\n\t%s' % config['launch'])
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# instantiate test subject
params = {key: data.parse(value) for key, value in list(config.get('params', dict()).items())}
try:
test_interface.instantiate(params)
except Exception as e:
Logger.print_failure('unable to instantiate %s (%s) with params:\n\t%s\n\t%s' %
(config['class'], config['path'], str(params), str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
# prepare user data
userdata = UserData()
for input_key, input_value in list(config.get('input', dict()).items()):
userdata[input_key] = data.parse(input_value)
expected = {key: data.parse(value) for key, value in config.get('output', dict()).items()}
# run test subject
try:
outcome = test_interface.execute(userdata, spin_cb=context.spin_once)
except Exception as e:
Logger.print_failure('failed to execute %s (%s)\n\t%s' %
(config['class'], config['path'], str(e)))
self._tests['test_%s_pass' % name] = self._test_pass(False)
return 0
if config.get('require_launch_success', False):
context.wait_for_finishing()
# evaluate outcome
self._tests['test_%s_outcome' % name] = self._test_outcome(outcome, config['outcome'])
outcome_ok = outcome == config['outcome']
if outcome_ok:
Logger.print_positive('correctly returned outcome %s' % outcome)
else:
Logger.print_negative('wrong outcome: %s' % outcome)
# evaluate output
output_ok = True
for expected_key, expected_value in list(expected.items()):
if expected_key in userdata:
equals = userdata[expected_key] == expected_value
self._tests['test_%s_output_%s' % (name, expected_key)] = \
self._test_output(userdata[expected_key], expected_value)
if not equals:
Logger.print_negative('wrong result for %s: %s != %s' %
(expected_key, userdata[expected_key], expected_value))
output_ok = False
else:
Logger.print_negative('no result for %s' % expected_key)
output_ok = False
if not context.success and config.get('require_launch_success', False):
Logger.print_negative('Launch file did not exit cleanly')
output_ok = False
if len(expected) > 0 and output_ok:
Logger.print_positive('all result outputs match expected')
# report result
success = import_only or outcome_ok and output_ok
Logger.print_result(name, success)
self._tests['test_%s_pass' % name] = self._test_pass(success)
return 1 if success else 0
def _verify_config(self, config):
if not isinstance(config, dict):
raise AssertionError('config needs to be a dictionary but is:\n\t%s' % str(config))
assert 'path' in config
assert 'class' in config or 'name' in config
assert 'outcome' in config or config.get('import_only', False)
# ROSUNIT interface
def perform_rostest(self, test_pkg):
TestCase = type(test_pkg + '_test_class', (unittest.TestCase,), self._tests)
rosunit.unitrun(test_pkg, test_pkg + '_flexbe_tests', TestCase)
def _test_output(self, value, expected):
def _test_call(test_self):
test_self.assertEqual(value, expected, "Output value %s does not match expected %s" % (value, expected))
return _test_call
def _test_outcome(self, outcome, expected):
def _test_call(test_self):
test_self.assertEqual(outcome, expected, "Outcome %s does not match expected %s" % (outcome, expected))
return _test_call
def _test_pass(self, passed):
def _test_call(test_self):
test_self.assertTrue(passed, "Did not pass configured tests.")
return _test_call
def _test_config_invalid(self, config):
def _test_call(test_self):
test_self.fail("Test config is invalid: %s" % config)
return _test_call
|
{
"content_hash": "f017671f3eb59f931c71f09462108a9d",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 116,
"avg_line_length": 43.298780487804876,
"alnum_prop": 0.5414730319673285,
"repo_name": "team-vigir/flexbe_behavior_engine",
"id": "8a386328b3b988e6eb84c5c521757a75578df5e4",
"size": "7123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexbe_testing/src/flexbe_testing/tester.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "8992"
},
{
"name": "Python",
"bytes": "204730"
},
{
"name": "Shell",
"bytes": "2896"
}
],
"symlink_target": ""
}
|
"""Tests for the Zsh extended_history parser."""
import unittest
from plaso.lib import timelib
from plaso.parsers import zsh_extended_history
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class ZshExtendedHistoryTest(test_lib.ParserTestCase):
"""Tests for the Zsh extended_history parser."""
@shared_test_lib.skipUnlessHasTestFile([u'zsh_extended_history.txt'])
def testParse(self):
"""Tests for the Parse method."""
parser_object = zsh_extended_history.ZshExtendedHistoryParser()
storage_writer = self._ParseFile(
[u'zsh_extended_history.txt'], parser_object)
self.assertEqual(len(storage_writer.events), 4)
event = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2016-03-12 08:26:50')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(event.elapsed_seconds, 0)
self.assertEqual(event.command, u'cd plaso')
event = storage_writer.events[2]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2016-03-26 11:54:53')
expected_command = u'echo dfgdfg \\\\\n& touch /tmp/afile'
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(event.command, expected_command)
event = storage_writer.events[3]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2016-03-26 11:54:57')
self.assertEqual(event.timestamp, expected_timestamp)
def testVerification(self):
"""Tests for the VerifyStructure method"""
parser_object = zsh_extended_history.ZshExtendedHistoryParser()
mediator = None
valid_lines = u': 1457771210:0;cd plaso'
self.assertTrue(parser_object.VerifyStructure(mediator, valid_lines))
invalid_lines = u': 2016-03-26 11:54:53;0;cd plaso'
self.assertFalse(parser_object.VerifyStructure(mediator, invalid_lines))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "37b6928bad0422a3f6588c14f19be650",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 34.92727272727273,
"alnum_prop": 0.7225403435710568,
"repo_name": "dc3-plaso/plaso",
"id": "11c9a170bd53485d47507dfdb6ba191d20a39285",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/zsh_extended_history.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
}
|
"""
Convert MBGA avatars from GIF -> PNG format so OpenCV can load them.
Requires ImageMagick to be installed (www.imagemagick.org)
"""
import os
import glob
def convert():
gifs = glob.glob(os.path.join("data/mbga/avatar/", "*.gif"))
for gif in gifs:
png = gif.replace(".gif", ".png")
# only save the first frame of the animated GIF
os.system("convert {0}[0] {1}".format(gif, png))
os.remove(gif)
if __name__=="__main__":
convert()
|
{
"content_hash": "ee8414e85dd9c876ae4b6c2f5b629b2c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 26.941176470588236,
"alnum_prop": 0.6441048034934498,
"repo_name": "6/jcrawler",
"id": "8fc70ec4a83783e049c7dd822dc2b02c0008f288",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbga_convert_avatar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "12634"
},
{
"name": "Python",
"bytes": "18650"
}
],
"symlink_target": ""
}
|
"""Tests the RelationshipUtil."""
from tests.unittest_utils import ForsetiTestCase
import unittest.mock as mock
import unittest
from google.cloud.forseti.common.util import relationship
class RelationshipUtilTest(ForsetiTestCase):
"""Test relationship_util."""
def test_get_resource_ancestors_from_full_name(self):
# resource is organization
mock_starting_resource = mock.MagicMock()
mock_starting_resource.type = 'organization'
mock_starting_resource.id = 'org1'
resource_ancestors = (
relationship.find_ancestors(
mock_starting_resource,
'organization/org1/'))
self.assertEqual(1, len(resource_ancestors))
# resource is project
mock_starting_resource.type = 'project'
mock_starting_resource.id = 'project3'
resource_ancestors = (
relationship.find_ancestors(
mock_starting_resource,
'organization/org1/folder/folder2/project/project3/'))
self.assertEqual(3, len(resource_ancestors))
self.assertEqual(mock_starting_resource, resource_ancestors[0])
self.assertEqual('folder2', resource_ancestors[1].id)
self.assertEqual('org1', resource_ancestors[2].id)
# resource has multiple folders, and subproject resources
mock_starting_resource.type = 'firewall'
mock_starting_resource.id = 'firewall5'
resource_ancestors = (
relationship.find_ancestors(
mock_starting_resource,
('organization/org1/folder/folder2/folder/folder3/'
'project/project4/firewall/firewall5/')))
self.assertEqual(5, len(resource_ancestors))
self.assertEqual(mock_starting_resource, resource_ancestors[0])
self.assertEqual('project4', resource_ancestors[1].id)
self.assertEqual('folder3', resource_ancestors[2].id)
self.assertEqual('folder2', resource_ancestors[3].id)
self.assertEqual('org1', resource_ancestors[4].id)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "276fd8d91ea7fcebbbf6d74aaf86f299",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 71,
"avg_line_length": 37.03508771929825,
"alnum_prop": 0.6475603979156798,
"repo_name": "forseti-security/forseti-security",
"id": "60a7c3c005da795f7aeac80bc936da0ac589a80d",
"size": "2725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/common/util/relationship_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3652"
},
{
"name": "HCL",
"bytes": "37409"
},
{
"name": "JavaScript",
"bytes": "1833"
},
{
"name": "Jinja",
"bytes": "6379"
},
{
"name": "Makefile",
"bytes": "5427"
},
{
"name": "Open Policy Agent",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "4140122"
},
{
"name": "Ruby",
"bytes": "37434"
},
{
"name": "Shell",
"bytes": "17062"
}
],
"symlink_target": ""
}
|
import ddt
from django.urls import reverse
from unittest import mock
from horizon import exceptions as horizon_exceptions
from manila_ui.api import manila as api_manila
from manila_ui.tests.dashboards.project import test_data
from manila_ui.tests import helpers as test
INDEX_URL = reverse('horizon:project:user_messages:index')
@ddt.ddt
class UserMessagesViewTests(test.TestCase):
@ddt.data(None, Exception('fake'))
def test_view(self, exc):
message_1 = test_data.fake_message_1
message_2 = test_data.fake_message_2
message_3 = test_data.fake_message_3
fake_message_list = [message_1, message_2, message_3]
url = reverse('horizon:project:user_messages:index')
self.mock_object(
api_manila, "messages_list",
mock.Mock(side_effect=exc, return_value=fake_message_list))
self.client.get(url)
self.assertNoMessages()
api_manila.messages_list.assert_called_once_with(mock.ANY)
@ddt.data(None, Exception('fake'))
def test_delete_message(self, exc):
message = test_data.fake_message_1
formData = {'action': 'user_messages__delete__%s' % message.id}
self.mock_object(api_manila, "messages_delete",
mock.Mock(side_effect=exc))
self.mock_object(
api_manila, "messages_list",
mock.Mock(return_value=[message]))
res = self.client.post(INDEX_URL, formData)
self.assertEqual(res.status_code, 302)
self.assertRedirectsNoFollow(res, INDEX_URL)
api_manila.messages_list.assert_called_once_with(mock.ANY)
api_manila.messages_delete.assert_called_once_with(
mock.ANY, test_data.fake_message_1.id)
@ddt.ddt
class UserMessagesDetailViewTests(test.TestCase):
def test_detail_view(self):
message = test_data.fake_message_1
url = reverse('horizon:project:user_messages:user_messages_detail',
args=[message.id])
self.mock_object(
api_manila, "messages_get", mock.Mock(return_value=message))
res = self.client.get(url)
self.assertContains(res, "<h1>User Message Details: %s</h1>"
% message.id,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.id, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.action_id, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.user_message, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.message_level, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.resource_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.resource_id, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % message.request_id, 1, 200)
self.assertNoMessages()
api_manila.messages_get.assert_called_once_with(
mock.ANY, message.id)
def test_detail_view_with_exception(self):
message = test_data.fake_message_1
url = reverse(
'horizon:project:user_messages:user_messages_detail',
args=[message.id])
self.mock_object(
api_manila, "messages_get",
mock.Mock(side_effect=horizon_exceptions.NotFound(404)))
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
api_manila.messages_get.assert_called_once_with(mock.ANY, message.id)
|
{
"content_hash": "052bfe1d45b9df34ec03275ec08f1fc3",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 38.355555555555554,
"alnum_prop": 0.6286210892236385,
"repo_name": "openstack/manila-ui",
"id": "5fbee91270d863f4946bd5c1a20b487ebd87266f",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila_ui/tests/dashboards/project/user_messages/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "72666"
},
{
"name": "Python",
"bytes": "756045"
},
{
"name": "Shell",
"bytes": "20977"
}
],
"symlink_target": ""
}
|
import eventlet
from eventlet import semaphore
import netaddr
from oslo.config import cfg
from quantum.agent.common import config
from quantum.agent.linux import external_process
from quantum.agent.linux import interface
from quantum.agent.linux import ip_lib
from quantum.agent.linux import iptables_manager
from quantum.agent.linux import utils
from quantum.agent import rpc as agent_rpc
from quantum.common import constants as l3_constants
from quantum.common import topics
from quantum.common import utils as common_utils
from quantum import context
from quantum import manager
from quantum.openstack.common import importutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import loopingcall
from quantum.openstack.common import periodic_task
from quantum.openstack.common.rpc import common as rpc_common
from quantum.openstack.common.rpc import proxy
from quantum.openstack.common import service
from quantum import service as quantum_service
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
class L3PluginApi(proxy.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, fullsync=True, router_id=None):
"""Make a remote process call to retrieve the sync data for routers."""
router_ids = [router_id] if router_id else None
return self.call(context,
self.make_msg('sync_routers', host=self.host,
fullsync=fullsync,
router_ids=router_ids),
topic=self.topic)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise common.RemoteError: with TooManyExternalNetworks
as exc_type if there are
more than one external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host),
topic=self.topic)
class RouterInfo(object):
def __init__(self, router_id, root_helper, use_namespaces, router):
self.router_id = router_id
self.ex_gw_port = None
self.internal_ports = []
self.floating_ips = []
self.root_helper = root_helper
self.use_namespaces = use_namespaces
self.router = router
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=root_helper,
#FIXME(danwent): use_ipv6=True,
namespace=self.ns_name())
self.routes = []
def ns_name(self):
if self.use_namespaces:
return NS_PREFIX + self.router_id
class L3NATAgent(manager.Manager):
OPTS = [
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Quantum metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, "
"set it below or equal to 0 to disable this "
"feature.")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" confgure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
try:
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % self.conf.interface_driver
raise SystemExit(msg)
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.PLUGIN, host)
self.fullsync = True
self.sync_sem = semaphore.Semaphore(1)
if self.conf.use_namespaces:
self._destroy_router_namespaces(self.conf.router_id)
super(L3NATAgent, self).__init__(host=self.conf.host)
def _destroy_router_namespaces(self, only_router_id=None):
"""Destroy router namespaces on the host to eliminate all stale
linux devices, iptables rules, and namespaces.
If only_router_id is passed, only destroy single namespace, to allow
for multiple l3 agents on the same host, without stepping on each
other's toes on init. This only makes sense if router_id is set.
"""
root_ip = ip_lib.IPWrapper(self.root_helper)
for ns in root_ip.get_namespaces(self.root_helper):
if ns.startswith(NS_PREFIX):
if only_router_id and not ns.endswith(only_router_id):
continue
try:
self._destroy_router_namespace(ns)
except Exception:
LOG.exception(_("Failed deleting namespace '%s'"), ns)
def _destroy_router_namespace(self, namespace):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=namespace,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=namespace,
prefix=EXTERNAL_DEV_PREFIX)
#TODO(garyk) Address the failure for the deletion of the namespace
def _create_router_namespace(self, ri):
ip_wrapper_root = ip_lib.IPWrapper(self.root_helper)
ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name())
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
def _fetch_external_net_id(self):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
try:
return self.plugin_rpc.get_external_network_id(self.context)
except rpc_common.RemoteError as e:
if e.exc_type == 'TooManyExternalNetworks':
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Quantum has more than "
"one external network.")
raise Exception(msg)
else:
raise
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
if self.conf.use_namespaces:
self._create_router_namespace(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
self._spawn_metadata_proxy(ri)
def _router_removed(self, router_id):
ri = self.router_info[router_id]
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
self._destroy_metadata_proxy(ri)
del self.router_info[router_id]
self._destroy_router_namespace(ri.ns_name())
def _spawn_metadata_proxy(self, router_info):
def callback(pid_file):
proxy_cmd = ['quantum-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--router_id=%s' % router_info.router_id,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%s' % self.conf.metadata_port]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'quantum-ns-metadata-proxy-%s.log' %
router_info.router_id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
router_info.router_id,
self.root_helper,
router_info.ns_name())
pm.enable(callback)
def _destroy_metadata_proxy(self, router_info):
pm = external_process.ProcessManager(
self.conf,
router_info.router_id,
self.root_helper,
router_info.ns_name())
pm.disable()
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def process_router(self, ri):
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
ri.internal_ports.append(p)
self.internal_network_added(ri, ex_gw_port,
p['network_id'], p['id'],
p['ip_cidr'], p['mac_address'])
for p in old_ports:
ri.internal_ports.remove(p)
self.internal_network_removed(ri, ex_gw_port, p['id'],
p['ip_cidr'])
internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports]
if ex_gw_port and not ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self.external_gateway_added(ri, ex_gw_port, internal_cidrs)
elif not ex_gw_port and ri.ex_gw_port:
self.external_gateway_removed(ri, ri.ex_gw_port,
internal_cidrs)
if ri.ex_gw_port or ex_gw_port:
self.process_router_floating_ips(ri, ex_gw_port)
ri.ex_gw_port = ex_gw_port
self.routes_updated(ri)
def process_router_floating_ips(self, ri, ex_gw_port):
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
existing_floating_ip_ids = set([fip['id'] for fip in ri.floating_ips])
cur_floating_ip_ids = set([fip['id'] for fip in floating_ips])
id_to_fip_map = {}
for fip in floating_ips:
if fip['port_id']:
if fip['id'] not in existing_floating_ip_ids:
ri.floating_ips.append(fip)
self.floating_ip_added(ri, ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
# store to see if floatingip was remapped
id_to_fip_map[fip['id']] = fip
floating_ip_ids_to_remove = (existing_floating_ip_ids -
cur_floating_ip_ids)
for fip in ri.floating_ips:
if fip['id'] in floating_ip_ids_to_remove:
ri.floating_ips.remove(fip)
self.floating_ip_removed(ri, ri.ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
else:
# handle remapping of a floating IP
new_fip = id_to_fip_map[fip['id']]
new_fixed_ip = new_fip['fixed_ip_address']
existing_fixed_ip = fip['fixed_ip_address']
if (new_fixed_ip and existing_fixed_ip and
new_fixed_ip != existing_fixed_ip):
floating_ip = fip['floating_ip_address']
self.floating_ip_removed(ri, ri.ex_gw_port,
floating_ip, existing_fixed_ip)
self.floating_ip_added(ri, ri.ex_gw_port,
floating_ip, new_fixed_ip)
ri.floating_ips.remove(fip)
ri.floating_ips.append(new_fip)
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
if self.conf.send_arp_for_ha > 0:
arping_cmd = ['arping', '-A', '-U',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
ip_address]
try:
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
else:
utils.execute(arping_cmd, check_exit_code=True,
root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e))
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def external_gateway_added(self, ri, ex_gw_port, internal_cidrs):
interface_name = self.get_external_device_name(ex_gw_port['id'])
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name())
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
gw_ip = ex_gw_port['subnet']['gateway_ip']
if ex_gw_port['subnet']['gateway_ip']:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.root_helper)
for (c, r) in self.external_gateway_nat_rules(ex_gw_ip,
internal_cidrs,
interface_name):
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
def external_gateway_removed(self, ri, ex_gw_port, internal_cidrs):
interface_name = self.get_external_device_name(ex_gw_port['id'])
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.external_gateway_nat_rules(ex_gw_ip, internal_cidrs,
interface_name):
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
def metadata_filter_rules(self):
rules = []
rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' % self.conf.metadata_port))
return rules
def metadata_nat_rules(self):
rules = []
rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % self.conf.metadata_port))
return rules
def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs,
interface_name):
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})]
for cidr in internal_cidrs:
rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
return rules
def internal_network_added(self, ri, ex_gw_port, network_id, port_id,
internal_cidr, mac_address):
interface_name = self.get_internal_device_name(port_id)
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [internal_cidr],
namespace=ri.ns_name())
ip_address = internal_cidr.split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
if ex_gw_port:
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.internal_network_nat_rules(ex_gw_ip,
internal_cidr):
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
def internal_network_removed(self, ri, ex_gw_port, port_id, internal_cidr):
interface_name = self.get_internal_device_name(port_id)
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name, namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
if ex_gw_port:
ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
for c, r in self.internal_network_nat_rules(ex_gw_ip,
internal_cidr):
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
def internal_network_nat_rules(self, ex_gw_ip, internal_cidr):
rules = [('snat', '-s %s -j SNAT --to-source %s' %
(internal_cidr, ex_gw_ip))]
return rules
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
ip_cidr = str(floating_ip) + '/32'
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name())
if ip_cidr not in [addr['cidr'] for addr in device.addr.list()]:
net = netaddr.IPNetwork(ip_cidr)
device.addr.add(net.version, ip_cidr, str(net.broadcast))
self._send_gratuitous_arp_packet(ri, interface_name, floating_ip)
for chain, rule in self.floating_forward_rules(floating_ip, fixed_ip):
ri.iptables_manager.ipv4['nat'].add_rule(chain, rule)
ri.iptables_manager.apply()
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
ip_cidr = str(floating_ip) + '/32'
net = netaddr.IPNetwork(ip_cidr)
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name())
device.addr.delete(net.version, ip_cidr)
for chain, rule in self.floating_forward_rules(floating_ip, fixed_ip):
ri.iptables_manager.ipv4['nat'].remove_rule(chain, rule)
ri.iptables_manager.apply()
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
with self.sync_sem:
if router_id in self.router_info:
try:
self._router_removed(router_id)
except Exception:
msg = _("Failed dealing with router "
"'%s' deletion RPC message")
LOG.debug(msg, router_id)
self.fullsync = True
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
if not routers:
return
with self.sync_sem:
try:
self._process_routers(routers)
except Exception:
msg = _("Failed dealing with routers update RPC message")
LOG.debug(msg)
self.fullsync = True
def router_removed_from_agent(self, context, payload):
self.router_deleted(context, payload['router_id'])
def router_added_to_agent(self, context, payload):
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
if not r['admin_state_up']:
continue
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if ex_net_id and ex_net_id != target_ex_net_id:
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self.process_router(ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
self._router_removed(router_id)
@periodic_task.periodic_task
def _sync_routers_task(self, context):
# we need to sync with router deletion RPC message
with self.sync_sem:
if self.fullsync:
try:
if not self.conf.use_namespaces:
router_id = self.conf.router_id
else:
router_id = None
routers = self.plugin_rpc.get_routers(
context, router_id)
self._process_routers(routers, all_routers=True)
self.fullsync = False
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def after_start(self):
LOG.info(_("L3 agent started"))
def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
#TODO(nati) move this code to iplib
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.conf.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.conf.root_helper)
def routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug(_("Added route entry is '%s'"), route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table(ri, 'replace', route)
for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route)
self._update_routing_table(ri, 'delete', route)
ri.routes = new_routes
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'quantum-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Quantum server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(L3NATAgent.OPTS)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf(project='quantum')
config.setup_logging(conf)
server = quantum_service.Service.create(
binary='quantum-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='quantum.agent.l3_agent.L3NATAgentWithStateReport')
service.launch(server).wait()
|
{
"content_hash": "e6e33812ab350778d10234b1be322d83",
"timestamp": "",
"source": "github",
"line_count": 737,
"max_line_length": 79,
"avg_line_length": 43.43962008141113,
"alnum_prop": 0.5420896454786819,
"repo_name": "yamt/neutron",
"id": "368cfb5c6eef7a3619f29ada1f05fd13ad06e782",
"size": "32739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/agent/l3_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4078056"
},
{
"name": "Shell",
"bytes": "10023"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import re
from SCons.Script import *
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
for (k,v) in dict.items():
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
return 0 # success
def subst_in_file(target, source, env):
if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError, "SubstFile requires SUBST_DICT to be set."
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
else:
d[k] = SCons.Util.to_String(v)
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k] = env.subst(v)
Depends(target, SCons.Node.Python.Value(d))
return target, source
def generate(env):
subst_action=SCons.Action.Action(subst_in_file, subst_in_file_string)
env['BUILDERS']['SubstFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
return 1
|
{
"content_hash": "a2629a3615918e0be0d5cb37103a1d9d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 34.725806451612904,
"alnum_prop": 0.6000928936367859,
"repo_name": "pquerna/ckl",
"id": "042035a8d8d37c84a9c6d286a185b3416298c1f9",
"size": "2277",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "site_scons/site_tools/subst.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "149279"
},
{
"name": "Python",
"bytes": "10527"
},
{
"name": "Shell",
"bytes": "788000"
}
],
"symlink_target": ""
}
|
import csv
import argparse
import sys
from agoTools.admin import Admin
from agoTools.admin import AGOLItems
def _raw_input(prompt=None, stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-p', '--password')
parser.add_argument('-file', '--file')
parser.add_argument('-portal', '--portal')
args = parser.parse_args()
inputFile = ''
if args.file == None:
args.file = _raw_input("CSV path: ")
if args.user == None:
args.user = _raw_input("Username:")
if args.portal == None:
args.portal = _raw_input("Portal: ")
args.portal = str(args.portal).replace("http://","https://")
agoAdmin = Admin(args.user,args.portal,args.password)
if args.file != None:
inputFile=args.file
with open(inputFile) as input:
dataReader = csv.DictReader(input)
items=AGOLItems(dataReader)
agoAdmin.deleteItems(items.AGOLItems_list)
|
{
"content_hash": "de937cb2d2266890b5aea3bfdd69a836",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 24.910714285714285,
"alnum_prop": 0.657347670250896,
"repo_name": "ecaldwell/ago-tools",
"id": "a53da158c7b5c9961656b0c0e0910b3548ecda2e",
"size": "1663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/deleteItems.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69082"
}
],
"symlink_target": ""
}
|
"""Telegram platform for notify component."""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.components.telegram_bot import (
ATTR_DISABLE_NOTIF,
ATTR_MESSAGE_TAG,
ATTR_PARSER,
)
from homeassistant.const import ATTR_LOCATION
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN as TELEGRAM_DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DOMAIN = "telegram_bot"
ATTR_KEYBOARD = "keyboard"
ATTR_INLINE_KEYBOARD = "inline_keyboard"
ATTR_PHOTO = "photo"
ATTR_VIDEO = "video"
ATTR_VOICE = "voice"
ATTR_DOCUMENT = "document"
CONF_CHAT_ID = "chat_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_CHAT_ID): vol.Coerce(int)})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
setup_reload_service(hass, TELEGRAM_DOMAIN, PLATFORMS)
chat_id = config.get(CONF_CHAT_ID)
return TelegramNotificationService(hass, chat_id)
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, hass, chat_id):
"""Initialize the service."""
self._chat_id = chat_id
self.hass = hass
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
service_data = {ATTR_TARGET: kwargs.get(ATTR_TARGET, self._chat_id)}
if ATTR_TITLE in kwargs:
service_data.update({ATTR_TITLE: kwargs.get(ATTR_TITLE)})
if message:
service_data.update({ATTR_MESSAGE: message})
data = kwargs.get(ATTR_DATA)
# Set message tag
if data is not None and ATTR_MESSAGE_TAG in data:
message_tag = data.get(ATTR_MESSAGE_TAG)
service_data.update({ATTR_MESSAGE_TAG: message_tag})
# Set disable_notification
if data is not None and ATTR_DISABLE_NOTIF in data:
disable_notification = data.get(ATTR_DISABLE_NOTIF)
service_data.update({ATTR_DISABLE_NOTIF: disable_notification})
# Set parse_mode
if data is not None and ATTR_PARSER in data:
parse_mode = data.get(ATTR_PARSER)
service_data.update({ATTR_PARSER: parse_mode})
# Get keyboard info
if data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(keyboard=keys)
elif data is not None and ATTR_INLINE_KEYBOARD in data:
keys = data.get(ATTR_INLINE_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(inline_keyboard=keys)
# Send a photo, video, document, voice, or location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
service_data.update(photo_data)
self.hass.services.call(DOMAIN, "send_photo", service_data=service_data)
return
if data is not None and ATTR_VIDEO in data:
videos = data.get(ATTR_VIDEO)
videos = videos if isinstance(videos, list) else [videos]
for video_data in videos:
service_data.update(video_data)
self.hass.services.call(DOMAIN, "send_video", service_data=service_data)
return
if data is not None and ATTR_VOICE in data:
voices = data.get(ATTR_VOICE)
voices = voices if isinstance(voices, list) else [voices]
for voice_data in voices:
service_data.update(voice_data)
self.hass.services.call(DOMAIN, "send_voice", service_data=service_data)
return
if data is not None and ATTR_LOCATION in data:
service_data.update(data.get(ATTR_LOCATION))
return self.hass.services.call(
DOMAIN, "send_location", service_data=service_data
)
if data is not None and ATTR_DOCUMENT in data:
service_data.update(data.get(ATTR_DOCUMENT))
return self.hass.services.call(
DOMAIN, "send_document", service_data=service_data
)
# Send message
_LOGGER.debug(
"TELEGRAM NOTIFIER calling %s.send_message with %s", DOMAIN, service_data
)
return self.hass.services.call(
DOMAIN, "send_message", service_data=service_data
)
|
{
"content_hash": "2069eb24e960161e276e08e8646819b2",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 88,
"avg_line_length": 36.8515625,
"alnum_prop": 0.6302734789060844,
"repo_name": "toddeye/home-assistant",
"id": "b87ddc670c359dc37606f765fe08819f9cfffa29",
"size": "4717",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/telegram/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_medium_s02_tier3.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "90128635338efed90671775513f30889",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 21.307692307692307,
"alnum_prop": 0.6714801444043321,
"repo_name": "anhstudios/swganh",
"id": "ae2b9a456ac107e190fd8f6df7d6f2147d9f8c86",
"size": "422",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/ship/shared_blacksun_medium_s02_tier3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
matches_payouts_probabilities_file = open('matching_bonus.csv', 'r')
line = matches_payouts_probabilities_file.readline()
expected_value = 0.0
while line:
tokens = line.split(',')
if len(tokens) != 3 or len(tokens[1]) < 1:
break
payout = float(tokens[1])
probability = float(tokens[2])
expected_value += payout * probability
line = matches_payouts_probabilities_file.readline()
matches_payouts_probabilities_file.close()
print "expected value: {0}".format(expected_value)
|
{
"content_hash": "9849214214ea759dfaec1c47c93a92db",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.697029702970297,
"repo_name": "jhurt/slotmath",
"id": "056bdd817bfc6dfddc1e41ceca4a20f7ebb54c29",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchingbonusmath.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "8596"
},
{
"name": "C++",
"bytes": "16532"
},
{
"name": "Python",
"bytes": "20417"
},
{
"name": "Shell",
"bytes": "174"
}
],
"symlink_target": ""
}
|
import sys
from PyQt4 import QtGui, QtCore
class Button(QtGui.QPushButton):
def mouseMoveEvent(self, e):
if e.buttons() != QtCore.Qt.RightButton:
return
# write the relative cursor position to mime data
mimeData = QtCore.QMimeData()
# simple string with 'x,y'
mimeData.setText('%d,%d' % (e.x(), e.y()))
# let's make it fancy. we'll show a "ghost" of the button as we drag
# grab the button to a pixmap
pixmap = QtGui.QPixmap.grabWidget(self)
# below makes the pixmap half transparent
painter = QtGui.QPainter(pixmap)
painter.setCompositionMode(painter.CompositionMode_DestinationIn)
painter.fillRect(pixmap.rect(), QtGui.QColor(0, 0, 0, 127))
painter.end()
# make a QDrag
drag = QtGui.QDrag(self)
# put our MimeData
drag.setMimeData(mimeData)
# set its Pixmap
drag.setPixmap(pixmap)
# shift the Pixmap so that it coincides with the cursor position
drag.setHotSpot(e.pos())
# start the drag operation
# exec_ will return the accepted action from dropEvent
if drag.exec_(QtCore.Qt.CopyAction | QtCore.Qt.MoveAction) == QtCore.Qt.MoveAction:
print ('moved')
else:
print ('copied')
def mousePressEvent(self, e):
QtGui.QPushButton.mousePressEvent(self, e)
if e.button() == QtCore.Qt.LeftButton:
print ('press')
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
self.setAcceptDrops(True)
button = Button('Button', self)
button.move(100, 65)
self.buttons = [button]
self.setWindowTitle('Copy or Move')
self.setGeometry(300, 300, 280, 150)
def dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
# get the relative position from the mime data
mime = e.mimeData().text()
x, y = map(int, mime.split(','))
if e.keyboardModifiers() & QtCore.Qt.ShiftModifier:
# copy
# so create a new button
button = Button('Button', self)
# move it to the position adjusted with the cursor position at drag
button.move(e.pos()-QtCore.QPoint(x, y))
# show it
button.show()
# store it
self.buttons.append(button)
# set the drop action as Copy
e.setDropAction(QtCore.Qt.CopyAction)
else:
# move
# so move the dragged button (i.e. event.source())
e.source().move(e.pos()-QtCore.QPoint(x, y))
# set the drop action as Move
e.setDropAction(QtCore.Qt.MoveAction)
# tell the QDrag we accepted it
e.accept()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ex = Example()
ex.show()
app.exec_()
|
{
"content_hash": "69bafab9abd73ac7d393ef251e471603",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 91,
"avg_line_length": 29.058252427184467,
"alnum_prop": 0.5746742398930839,
"repo_name": "go2net/PythonBlocks",
"id": "7c3c04e0c134626f0c2fff253329d4c099a2915b",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "t.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "560957"
}
],
"symlink_target": ""
}
|
import json
import unittest
from stix import Entity, EntityList
from stix.utils import NamespaceInfo
import stix.bindings.stix_core as core_binding
from stix.core import STIXPackage
import stix.utils
import cybox.utils
def round_trip_dict(cls, dict_):
obj = cls.object_from_dict(dict_)
dict2 = cls.dict_from_object(obj)
return dict2
def round_trip(o, output=False, list_=False):
""" Performs all eight conversions to verify import/export functionality.
1. cybox.Entity -> dict/list
2. dict/list -> JSON string
3. JSON string -> dict/list
4. dict/list -> cybox.Entity
5. cybox.Entity -> Bindings Object
6. Bindings Object -> XML String
7. XML String -> Bindings Object
8. Bindings object -> cybox.Entity
It returns the final object, so tests which call this function can check to
ensure it was not modified during any of the transforms.
"""
klass = o.__class__
if output:
print "Class: ", klass
print "-" * 40
# 1. cybox.Entity -> dict/list
if list_:
d = o.to_list()
else:
d = o.to_dict()
# 2. dict/list -> JSON string
json_string = json.dumps(d)
if output:
print(json_string)
print "-" * 40
# Before parsing the JSON, make sure the cache is clear
cybox.utils.cache_clear()
# 3. JSON string -> dict/list
d2 = json.loads(json_string)
# 4. dict/list -> cybox.Entity
if list_:
o2 = klass.from_list(d2)
else:
o2 = klass.from_dict(d2)
# 5. Entity -> Bindings Object
ns_info = NamespaceInfo()
xobj = o2.to_obj(ns_info=ns_info)
try:
# 6. Bindings Object -> XML String
xml_string = o2.to_xml()
except KeyError as ex:
print str(ex)
ns_info.finalize()
print ns_info.finalized_namespaces
raise ex
if output:
print(xml_string)
print "-" * 40
# Before parsing the XML, make sure the cache is clear
cybox.utils.cache_clear()
#7. XML String -> Bindings Object
xobj2 = klass._binding.parseString(xml_string)
# 8. Bindings object -> cybox.Entity
o3 = klass.from_obj(xobj2)
return o3
class EntityTestCase(object):
"""A base class for testing STIX Entities"""
def setUp(self):
self.assertNotEqual(self.klass, None)
self.assertNotEqual(self._full_dict, None)
def test_round_trip_dict(self):
# Don't run this test on the base class
if type(self) == type(EntityTestCase):
return
dict2 = round_trip_dict(self.klass, self._full_dict)
self.maxDiff = None
self.assertEqual(self._full_dict, dict2)
def test_round_trip(self):
# Don't run this test on the base class
if type(self) == type(EntityTestCase):
return
ent = self.klass.from_dict(self._full_dict)
ent2 = round_trip(ent, output=True)
#TODO: eventually we want to test the objects are the same, but for
# now, just make sure there aren't any errors.
|
{
"content_hash": "43350f2f8829355dbd33588bf2788cf2",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 26.05982905982906,
"alnum_prop": 0.6208592981305346,
"repo_name": "benjamin9999/python-stix",
"id": "f50a7226c4f23786f63628b38e27a3b5c7b658dc",
"size": "3154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "269"
},
{
"name": "Python",
"bytes": "1840423"
},
{
"name": "Shell",
"bytes": "6711"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_trust import Parameters
from library.modules.bigip_device_trust import ModuleManager
from library.modules.bigip_device_trust import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_trust import Parameters
from ansible.modules.network.f5.bigip_device_trust import ModuleManager
from ansible.modules.network.f5.bigip_device_trust import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
def test_module_parameters_with_peer_type(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
type='peer'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
assert p.type is True
def test_module_parameters_with_subordinate_type(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
type='subordinate'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
assert p.type is False
def test_hyphenated_peer_hostname(self):
args = dict(
peer_hostname='hn---hyphen____underscore.hmatsuda.local',
)
p = Parameters(params=args)
assert p.peer_hostname == 'hn---hyphen____underscore.hmatsuda.local'
def test_numbered_peer_hostname(self):
args = dict(
peer_hostname='BIG-IP_12x_ans2.example.local',
)
p = Parameters(params=args)
assert p.peer_hostname == 'BIG-IP_12x_ans2.example.local'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_device_trust(self, *args):
set_module_args(dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_device_trust_idempotent(self, *args):
set_module_args(dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is False
|
{
"content_hash": "7c36dca575bbc78c28bf39894eafba0b",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 77,
"avg_line_length": 28.95505617977528,
"alnum_prop": 0.5983701979045402,
"repo_name": "thaim/ansible",
"id": "6c7f325b0121359517f304acbadb8d193a5cbd3e",
"size": "5311",
"binary": false,
"copies": "21",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/f5/test_bigip_device_trust.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import logging
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.vpn import utils as vpn_utils
from neutronclient.openstack.common.gettextutils import _
class ListIPsecPolicy(neutronv20.ListCommand):
"""List ipsecpolicies that belongs to a given tenant connection."""
resource = 'ipsecpolicy'
log = logging.getLogger(__name__ + '.ListIPsecPolicy')
list_columns = ['id', 'name', 'auth_algorithm',
'encryption_algorithm', 'pfs']
_formatters = {}
pagination_support = True
sorting_support = True
class ShowIPsecPolicy(neutronv20.ShowCommand):
"""Show information of a given ipsecpolicy."""
resource = 'ipsecpolicy'
log = logging.getLogger(__name__ + '.ShowIPsecPolicy')
class CreateIPsecPolicy(neutronv20.CreateCommand):
"""Create an ipsecpolicy."""
resource = 'ipsecpolicy'
log = logging.getLogger(__name__ + '.CreateIPsecPolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
help=_('Description of the IPsecPolicy'))
parser.add_argument(
'--transform-protocol',
default='esp', choices=['esp', 'ah', 'ah-esp'],
help=_('Transform Protocol in lowercase, default:esp'))
parser.add_argument(
'--auth-algorithm',
default='sha1', choices=['sha1'],
help=_('Authentication algorithm in lowercase, default:sha1'))
parser.add_argument(
'--encryption-algorithm',
default='aes-128', choices=['3des',
'aes-128',
'aes-192',
'aes-256'],
help=_('Encryption Algorithm in lowercase, default:aes-128'))
parser.add_argument(
'--encapsulation-mode',
default='tunnel', choices=['tunnel', 'transport'],
help=_('Encapsulation Mode in lowercase, default:tunnel'))
parser.add_argument(
'--pfs',
default='group5', choices=['group2', 'group5', 'group14'],
help=_('Perfect Forward Secrecy in lowercase, default:group5'))
parser.add_argument(
'--lifetime',
metavar="units=UNITS,value=VALUE",
type=utils.str2dict,
help=vpn_utils.lifetime_help("IPsec"))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of the IPsecPolicy'))
def args2body(self, parsed_args):
body = {'ipsecpolicy': {
'auth_algorithm': parsed_args.auth_algorithm,
'encryption_algorithm': parsed_args.encryption_algorithm,
'encapsulation_mode': parsed_args.encapsulation_mode,
'transform_protocol': parsed_args.transform_protocol,
'pfs': parsed_args.pfs,
}, }
if parsed_args.name:
body['ipsecpolicy'].update({'name': parsed_args.name})
if parsed_args.description:
body['ipsecpolicy'].update(
{'description': parsed_args.description}
)
if parsed_args.tenant_id:
body['ipsecpolicy'].update({'tenant_id': parsed_args.tenant_id})
if parsed_args.lifetime:
vpn_utils.validate_lifetime_dict(parsed_args.lifetime)
body['ipsecpolicy'].update({'lifetime': parsed_args.lifetime})
return body
class UpdateIPsecPolicy(neutronv20.UpdateCommand):
"""Update a given ipsec policy."""
resource = 'ipsecpolicy'
log = logging.getLogger(__name__ + '.UpdateIPsecPolicy')
def add_known_arguments(self, parser):
parser.add_argument(
'--lifetime',
metavar="units=UNITS,value=VALUE",
type=utils.str2dict,
help=vpn_utils.lifetime_help("IPsec"))
def args2body(self, parsed_args):
body = {'ipsecpolicy': {
}, }
if parsed_args.lifetime:
vpn_utils.validate_lifetime_dict(parsed_args.lifetime)
body['ipsecpolicy'].update({'lifetime': parsed_args.lifetime})
return body
class DeleteIPsecPolicy(neutronv20.DeleteCommand):
"""Delete a given ipsecpolicy."""
resource = 'ipsecpolicy'
log = logging.getLogger(__name__ + '.DeleteIPsecPolicy')
|
{
"content_hash": "2238f54020b4d99302493419819d9ef5",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 76,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.5938073394495413,
"repo_name": "vijayendrabvs/ssl-python-neutronclient",
"id": "a8a43238cc6484c61c749d90a460e23fe80a359b",
"size": "5080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutronclient/neutron/v2_0/vpn/ipsecpolicy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "661899"
},
{
"name": "Shell",
"bytes": "5278"
}
],
"symlink_target": ""
}
|
"""Platform for shared base classes for sensors."""
from __future__ import annotations
from homeassistant.helpers.entity import DeviceInfo, EntityDescription
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
class FlumeEntity(CoordinatorEntity[DataUpdateCoordinator[None]]):
"""Base entity class."""
_attr_attribution = "Data provided by Flume API"
_attr_has_entity_name = True
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: EntityDescription,
device_id: str,
location_name: str,
is_bridge: bool = False,
) -> None:
"""Class initializer."""
super().__init__(coordinator)
self.entity_description = description
self.device_id = device_id
if is_bridge:
name = "Flume Bridge"
else:
name = "Flume Sensor"
self._attr_unique_id = f"{description.key}_{device_id}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, device_id)},
manufacturer="Flume, Inc.",
model="Flume Smart Water Monitor",
name=f"{name} {location_name}",
configuration_url="https://portal.flumewater.com",
)
async def async_added_to_hass(self):
"""Request an update when added."""
await super().async_added_to_hass()
# We do not ask for an update with async_add_entities()
# because it will update disabled entities
await self.coordinator.async_request_refresh()
|
{
"content_hash": "735d365cc23e302caf7d1e7933db0989",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 31.03846153846154,
"alnum_prop": 0.6245353159851301,
"repo_name": "w1ll1am23/home-assistant",
"id": "7cd84127c647e7083584e5621876fa8faa4773c5",
"size": "1614",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/flume/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import pandas as pd
import tensorflow as tf
# Step 1 - Load and parse the data
# Importing of data
titanic_train = pd.read_csv('/Users/path/to/train.csv')
titanic_test = pd.read_csv('/Users/path/to/test.csv')
titanic_test_result = pd.read_csv('/Users/path/to/genderclassmodel.csv')
# dropping if tables
titanic_train = titanic_train.drop(['PassengerId','Name','Age','Ticket','Cabin','Fare'], axis = 1)
titanic_test = titanic_test.drop(['PassengerId','Name','Age','Ticket','Cabin','Fare'], axis = 1)
# we need to convert gender to 1s and -1s
# my previous method was behaving a bit weirdly so had to make an array and then do things
sex_array = list()
for i in range(len(titanic_train)):
if titanic_train['Sex'][i] == 'male':
sex_array.append(1.0)
else:
sex_array.append(-1.0)
titanic_train.loc[:, ('sex_usable')] = sex_array
# Step 2 - make label
# Since we alredy have the label in the training set
titanic_survive = pd.DataFrame()
titanic_survive.loc[:, ('Survived')] = titanic_train['Survived'].astype(float)
# Step 3 - prepare the data for tensorflow
# convert features into tensor
'''
inputX = titanic_train.loc[:, ['sex_usable', 'Pclass']].as_matrix()
This input was not giving any good output and so, we cannot use this
'''
inputX = titanic_train.loc[:, ['Pclass', 'Parch', 'sex_usable']].as_matrix()
# convert labels into output tensor
inputY = titanic_survive.loc[:, ['Survived']].as_matrix()
# creating the testing data
inputTest = titanic_test.loc[:, ['Pclass', 'Parch', 'sex_usable']].as_matrix()
outputTest = titanic_test_result.loc[:, ['Survived']].as_matrix()
# Step 4 - write out our hyyperparameters
learning_rate = 0.1
training_epochs = 10000
display_step = 2000
n_samples = inputY.size
# Step 5 - Create our computation graph neural network
n_hidden_1 = 5
n_hidden_2 = 3
x = tf.placeholder(tf.float32, [None, 3])
W1 = tf.Variable(tf.truncated_normal([3,n_hidden_1]))
b1 = tf.Variable(tf.truncated_normal([n_hidden_1]))
W2 = tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2]))
b2 = tf.Variable(tf.truncated_normal([n_hidden_2]))
W3 = tf.Variable(tf.truncated_normal([n_hidden_2, 1]))
b3 = tf.Variable(tf.truncated_normal([1]))
y1 = tf.add(tf.matmul(x, W1), b1)
# y1 = tf.nn.sigmoid(y1)
y2 = tf.add(tf.matmul(y1, W2), b2)
y2 = tf.nn.sigmoid(y2)
y3 = tf.add(tf.matmul(y2, W3), b3)
y3 = tf.nn.sigmoid(y3)
y_ = tf.placeholder(tf.float32, [None, 1])
print("Step five done")
# Step 6 - perform training
cost = tf.reduce_sum(tf.pow(y_ - y3, 2))/(2 * n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# training loop
for i in range(training_epochs):
sess.run([optimizer], feed_dict = {x: inputX, y_: inputY})
if i % display_step == 0:
cc = sess.run(cost, feed_dict = {x: inputX, y_: inputY})
print("Training Step:", '%04d' % (i), "cost =", "{:.9f}".format(cc))
print("Optimisation Finished")
y = sess.run(y3, feed_dict = {x: inputTest})
def hardLimit(a):
temp = []
for i in range(len(a)):
if a[i] >= 0.5:
temp.append(1.0)
else:
temp.append(0.0)
return temp
y = hardLimit(y)
corr = 0
total = len(y)
for i in range(total):
if y[i] == outputTest[i]:
corr += 1
percentage_correct = (corr / total) * 100
print(percentage_correct, corr, total)
sess.close()
|
{
"content_hash": "59591cc908dc7ca66c7231cfaddbfbcf",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 98,
"avg_line_length": 29.236842105263158,
"alnum_prop": 0.6801680168016802,
"repo_name": "bondenitrr2015/Machine-Learning",
"id": "2660bc5a2916efda8b79cd918033fb334ff59bd2",
"size": "3407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "titanic_quick_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8545"
}
],
"symlink_target": ""
}
|
import sys
import os
import importlib
import glob
# Imoprt and instantiate each Cmd object.
_this_dir = os.path.dirname(__file__)
_this_mod = os.path.basename(_this_dir)
def build_cmds(sub_parser):
cmd_objs = {}
imlist = glob.glob(os.path.join(_this_dir, "*.py"))
imlist.remove(os.path.join(_this_dir, "__init__.py"))
imlist.remove(os.path.join(_this_dir, "base.py"))
imlist = [os.path.basename(x) for x in imlist]
imlist = [os.path.splitext(x)[0] for x in imlist]
for im in imlist:
# print(im)
mod = importlib.import_module("pcm." + _this_mod + '.' + im)
if hasattr(mod, 'Cmd'):
# print("Found Command: ", mod.Cmd.name)
cmd_objs[mod.Cmd.name] = mod.Cmd(sub_parser)
cmd_objs[mod.Cmd.name].build()
# end for im in imlist
# print(cmd_objs)
return cmd_objs
#build_cmds()
|
{
"content_hash": "655718753459d0b1f1426ec548b81a63",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 26.545454545454547,
"alnum_prop": 0.6015981735159818,
"repo_name": "jeffbuttars/pcm",
"id": "77f55f97bd24393cb3cd5e2146c9c988c2abb890",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcmpy/cmds/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11973"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
from PyQt4 import Qt, QtCore
class OkAddCase(Qt.QWidget):
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
self.nameEdit = Qt.QLineEdit(self)
self.nameEdit.setPlaceholderText("名称")
self.cateEdit = Qt.QLineEdit(self)
self.cateEdit.setPlaceholderText("分类")
self.descEdit = OkTextEdit(self)
layout = Qt.QVBoxLayout()
layout.setMargin(0)
layout.addWidget(self.nameEdit)
layout.addWidget(self.cateEdit)
layout.addWidget(self.descEdit)
self.setLayout(layout)
def getNameAndDesc(self):
if not self.descEdit.state:
return (self.nameEdit.text(), self.cateEdit.text(), self.descEdit.toPlainText())
return (self.nameEdit.text(), self.cateEdit.text(), '')
def hideEvent(self, event):
self.nameEdit.setText('')
self.cateEdit.setText('')
if not self.descEdit.state:
self.descEdit.state = True
self.descEdit.setTextColor(Qt.QColor.fromRgb(128, 128, 128))
self.descEdit.setText("描述")
event.accept()
class OkTextEdit(Qt.QTextEdit):
def __init__(self, parent=None):
Qt.QTextEdit.__init__(self, parent)
self.state = True
self.setTextColor(Qt.QColor.fromRgb(128, 128, 128))
self.setText("描述")
def focusInEvent(self, event):
if self.state:
self.clear()
self.state = False
self.setTextColor(Qt.QColor.fromRgb(0, 0, 0))
event.accept()
Qt.QTextEdit.focusInEvent(self, event)
event.accept()
def focusOutEvent(self, event):
if not self.state and len(self.toPlainText()) == 0:
self.state = True
self.setTextColor(Qt.QColor.fromRgb(128, 128, 128))
self.setText("描述")
event.accept()
Qt.QTextEdit.focusOutEvent(self, event)
event.accept()
|
{
"content_hash": "35fa67b3ccf8dd2b39e7805a483e2440",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 92,
"avg_line_length": 33.86440677966102,
"alnum_prop": 0.58008008008008,
"repo_name": "ghold/OneKeySql",
"id": "294f7be2fb3a1afd79475636b9654c7fc1f294df",
"size": "2018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onekey/OkAddCase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228760"
}
],
"symlink_target": ""
}
|
import requests as req
from requests.exceptions import ConnectionError as ce
from urllib3.exceptions import MaxRetryError,NewConnectionError
import asyncio
import concurrent.futures
import multiprocessing
from multiprocessing import Process
import os
import sys
import re
import bs4
from apikeys import apikey
import urllib
import urllib.request
from urllib.parse import urlencode
#import time
#t = time.process_time()
def printinfo(string):
with open('DailyNews.log','a+') as f1:
f1.write(string)
def Worker(arg):
# elif(KeyboardInterrupt):
# sys.exit()
with open(os.path.join(mypath,str(arg['source']))+".txt","w+") as f:
f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
f.write(str(arg['source']).title()+'\n')
f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
for k in range(len(arg['articles'])):
f.write('-----------------------------------------------------\n')
f.write(str(arg['articles'][k]['title'])+'\n')
f.write('-----------------------------------------------------\n')
f.write(str(arg['articles'][k]['description'])+'\n')
f.write('-----------------------------------------------------\n')
f.write('Author:-'+str(arg['articles'][k]['author'])+'\n')
f.write('-----------------------------------------------------\n')
if arg['articles'][k]['urlToImage'] is not None:
try:
os.chdir(imgpath)
except (OSError,WindowsError):
os.chdir(mypath)
try:
c='Downloading images embedded in articles from {}'.format(arg['source'])
printinfo(c)
urllib.request.urlretrieve(arg['articles'][k]['urlToImage'],'{}-{}'.format(arg['source'],k+1))
except:
d='No images found for the {}th {} article.'.format(k+1,arg['source'])
printinfo(d)
cwd = os.getcwd()
mypath = os.path.join(cwd,'DailyNews')
imgpath = os.path.join(mypath,'Images')
if not os.path.exists(mypath):
os.makedirs(mypath)
if not os.path.exists(imgpath):
os.makedirs(imgpath)
KEY = apikey
sites=['ars-technica','al-jazeera-english','bloomberg','bbc-news','bbc-sport','buzzfeed','cnn','engadget','espn','espn-cric-info','fortune','google-news','hacker-news','ign','mashable','mtv-news','national-geographic','new-york-magazine','new-scientist','reddit-r-all','reuters','techcrunch','techradar','the-hindu','the-economist','the-huffington-post','the-new-york-times','the-next-web','the-telegraph','the-times-of-india','the-wall-street-journal','the-washington-post','the-verge','time']
times = ['top','latest','popular']
url_list=[]
for i in range(len(sites)):
for j in range(len(times)):
payload={'source':sites[i],
'sortBy':times[j],
'apiKey':KEY
};
qstring=urlencode(payload)
url='https://newsapi.org/v1/articles?'+qstring
url_list.append(url)
r = []
async def webreqsend():
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as Executor:
loop = asyncio.get_event_loop()
try:
futures = [loop.run_in_executor(Executor,req.get,u) for u in url_list]
except (ce,MaxRetryError,TimeoutError,NewConnectionError) as e:
printinfo(e)
for responses in await asyncio.gather(*futures):
r.append(responses)
loop = asyncio.get_event_loop()
loop.run_until_complete(webreqsend())
rsonlist=[]
for i in range(len(r)):
if r is None:
x='Whole Object is None'
printinfo(x)
elif r[i] is None:
y='Particular object is None'
printinfo(y)
elif r[i] is not None:
rso=r[i].json()
elif rso['status']=='error':
z='Response Error'
printinfo(z)
if r[i] is not None and rso['status']!='error':
rsonlist.append(r[i].json())
k=len(rsonlist)
a='Requests completed'
printinfo(a)
p=[Process(target=Worker,args=(rsonlist[i],)) for i in range(k)]
for process in p:
process.start()
for process in p:
process.join()
#elapsed_time = time.process_time()-t
#print("\nTime Taken: %ds\n"%(elapsed_time))
|
{
"content_hash": "f51c84ee00e86e3642bb516169777633",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 494,
"avg_line_length": 40.84615384615385,
"alnum_prop": 0.5637947269303202,
"repo_name": "Bharat123rox/DailyNews",
"id": "ca8438f3c292a4feb9cf22652b953d996452d7e6",
"size": "4248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyNews.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4248"
}
],
"symlink_target": ""
}
|
"""This module contains a Google Cloud Text to Speech Hook."""
from typing import Dict, Optional, Sequence, Union
from google.api_core.retry import Retry
from google.cloud.texttospeech_v1 import TextToSpeechClient
from google.cloud.texttospeech_v1.types import (
AudioConfig,
SynthesisInput,
SynthesizeSpeechResponse,
VoiceSelectionParams,
)
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTextToSpeechHook(GoogleBaseHook):
"""
Hook for Google Cloud Text to Speech API.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None # type: Optional[TextToSpeechClient]
def get_conn(self) -> TextToSpeechClient:
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
:rtype: google.cloud.texttospeech_v1.TextToSpeechClient
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def synthesize_speech(
self,
input_data: Union[Dict, SynthesisInput],
voice: Union[Dict, VoiceSelectionParams],
audio_config: Union[Dict, AudioConfig],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
) -> SynthesizeSpeechResponse:
"""
Synthesizes text input
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:return: SynthesizeSpeechResponse See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse
:rtype: object
"""
client = self.get_conn()
self.log.info("Synthesizing input: %s", input_data)
return client.synthesize_speech(
input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout
)
|
{
"content_hash": "9f38de62f8cb3fd0dda3383084d271c7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 160,
"avg_line_length": 46.410526315789475,
"alnum_prop": 0.6965298253572239,
"repo_name": "bolkedebruin/airflow",
"id": "eae6702bdef83531ce5b3775cb12fc3aba62c134",
"size": "5196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/hooks/text_to_speech.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
import sys
from tuple_normaliser import normalise_tuple
class DupeReader:
def __init__(self, truth_files):
self.dupes = set([])
for truth_file in truth_files:
with open(truth_file) as truth:
self.dupes.update(normalise_tuple(tuple(line.strip().split(' '))) for line in truth.readlines())
def main():
truth_files = [
'/afs/inf.ed.ac.uk/user/s11/s1157979/Public/truth/t1.dup',
'/afs/inf.ed.ac.uk/user/s11/s1157979/Public/truth/t2.dup',
'/afs/inf.ed.ac.uk/user/s11/s1157979/Public/truth/t3.dup'
]
truth = DupeReader(truth_files).dupes
detected = DupeReader([sys.argv[1]]).dupes
real_dupes = len(truth)
reported_dupes = len(detected)
true_positives = len(truth & detected)
false_positives = len(detected - truth)
false_negatives = len(truth - detected)
print "True_P:" + str(true_positives) + " False_P: " + str(false_positives) + " False_N: " + str(false_negatives)
recall = true_positives / float(real_dupes)
precision = true_positives / float(true_positives + false_positives)
f_1 = (2 * recall * precision) / (recall + precision)
print "Precision: " + str(precision) + " Recall: " + str(recall) + " F1: " + str(f_1)
if __name__ == '__main__':
main()
|
{
"content_hash": "cf78c7d280d4a889b49c7ebb3e78494d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 115,
"avg_line_length": 32.26315789473684,
"alnum_prop": 0.6574225122349103,
"repo_name": "cronin101/TTS3",
"id": "399cea9422137ce4b8962a34d8d73c4c4138c5da",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13777"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Preferences(object):
def setupUi(self, Preferences):
Preferences.setObjectName("Preferences")
Preferences.resize(465, 374)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Preferences.sizePolicy().hasHeightForWidth())
Preferences.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(Preferences)
self.verticalLayout.setObjectName("verticalLayout")
self.form_layout = QtWidgets.QFormLayout()
self.form_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
self.form_layout.setObjectName("form_layout")
self.enabled_sliceview_label = QtWidgets.QLabel(Preferences)
self.enabled_sliceview_label.setObjectName("enabled_sliceview_label")
self.form_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.enabled_sliceview_label)
self.enabled_orthoview_combo_box = QtWidgets.QComboBox(Preferences)
self.enabled_orthoview_combo_box.setObjectName("enabled_orthoview_combo_box")
self.enabled_orthoview_combo_box.addItem("")
self.enabled_orthoview_combo_box.addItem("")
self.form_layout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.enabled_orthoview_combo_box)
self.Grid = QtWidgets.QLabel(Preferences)
self.Grid.setObjectName("Grid")
self.form_layout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.Grid)
self.gridview_style_combo_box = QtWidgets.QComboBox(Preferences)
self.gridview_style_combo_box.setObjectName("gridview_style_combo_box")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/part/grid_points"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gridview_style_combo_box.addItem(icon, "")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/part/grid_lines"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gridview_style_combo_box.addItem(icon1, "")
self.form_layout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.gridview_style_combo_box)
self.zoom_speed_label = QtWidgets.QLabel(Preferences)
self.zoom_speed_label.setObjectName("zoom_speed_label")
self.form_layout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.zoom_speed_label)
self.zoom_speed_slider = QtWidgets.QSlider(Preferences)
self.zoom_speed_slider.setMinimumSize(QtCore.QSize(140, 0))
self.zoom_speed_slider.setMinimum(1)
self.zoom_speed_slider.setMaximum(100)
self.zoom_speed_slider.setSingleStep(1)
self.zoom_speed_slider.setProperty("value", 50)
self.zoom_speed_slider.setOrientation(QtCore.Qt.Horizontal)
self.zoom_speed_slider.setInvertedControls(False)
self.zoom_speed_slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.zoom_speed_slider.setTickInterval(0)
self.zoom_speed_slider.setObjectName("zoom_speed_slider")
self.form_layout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.zoom_speed_slider)
self.show_icon_label_text = QtWidgets.QLabel(Preferences)
self.show_icon_label_text.setObjectName("show_icon_label_text")
self.form_layout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.show_icon_label_text)
self.show_icon_labels = QtWidgets.QCheckBox(Preferences)
self.show_icon_labels.setChecked(True)
self.show_icon_labels.setObjectName("show_icon_labels")
self.form_layout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.show_icon_labels)
self.verticalLayout.addLayout(self.form_layout)
self.button_box = QtWidgets.QDialogButtonBox(Preferences)
self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.RestoreDefaults)
self.button_box.setObjectName("button_box")
self.verticalLayout.addWidget(self.button_box)
self.actionClose = QtWidgets.QAction(Preferences)
self.actionClose.setObjectName("actionClose")
self.retranslateUi(Preferences)
QtCore.QMetaObject.connectSlotsByName(Preferences)
def retranslateUi(self, Preferences):
_translate = QtCore.QCoreApplication.translate
Preferences.setWindowTitle(_translate("Preferences", "Preferences"))
self.enabled_sliceview_label.setText(_translate("Preferences", "Slice View"))
self.enabled_orthoview_combo_box.setItemText(0, _translate("Preferences", "Legacy Slice View"))
self.enabled_orthoview_combo_box.setItemText(1, _translate("Preferences", "Grid View"))
self.Grid.setText(_translate("Preferences", "Grid Appearance"))
self.gridview_style_combo_box.setItemText(0, _translate("Preferences", "Points"))
self.gridview_style_combo_box.setItemText(1, _translate("Preferences", "Lines"))
self.zoom_speed_label.setText(_translate("Preferences", "Mousewheel zoom speed:"))
self.show_icon_label_text.setText(_translate("Preferences", "Show Icon Labels:"))
self.show_icon_labels.setText(_translate("Preferences", "(needs restart)"))
self.actionClose.setText(_translate("Preferences", "Close"))
self.actionClose.setShortcut(_translate("Preferences", "Ctrl+W"))
import cadnano.gui.dialogs.dialogicons_rc
|
{
"content_hash": "613113eb0d3b117de8e98e1ce001004e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 108,
"avg_line_length": 63.65882352941176,
"alnum_prop": 0.7229717242653854,
"repo_name": "scholer/cadnano2.5",
"id": "3767fa09086666b694392a2eda0c550b1b6df976",
"size": "5618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cadnano/gui/dialogs/ui_preferences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "1624263"
},
{
"name": "QMake",
"bytes": "3719"
}
],
"symlink_target": ""
}
|
import os
import signal
from unittest import mock
from django.db.backends.postgresql.client import DatabaseClient
from django.test import SimpleTestCase
class PostgreSqlDbshellCommandTestCase(SimpleTestCase):
def _run_it(self, dbinfo):
"""
That function invokes the runshell command, while mocking
subprocess.call. It returns a 2-tuple with:
- The command line list
- The content of the file pointed by environment PGPASSFILE, or None.
"""
def _mock_subprocess_call(*args):
self.subprocess_args = list(*args)
if 'PGPASSFILE' in os.environ:
with open(os.environ['PGPASSFILE'], 'r') as f:
self.pgpass = f.read().strip() # ignore line endings
else:
self.pgpass = None
return 0
self.subprocess_args = None
self.pgpass = None
with mock.patch('subprocess.call', new=_mock_subprocess_call):
DatabaseClient.runshell_db(dbinfo)
return self.subprocess_args, self.pgpass
def test_basic(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'password': 'somepassword',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
'somehost:444:dbname:someuser:somepassword',
)
)
def test_nopass(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'someuser',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
None,
)
)
def test_column(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'some:user',
'password': 'some:password',
'host': '::1',
'port': '444',
}), (
['psql', '-U', 'some:user', '-h', '::1', '-p', '444', 'dbname'],
'\\:\\:1:444:dbname:some\\:user:some\\:password',
)
)
def test_escape_characters(self):
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': 'some\\user',
'password': 'some\\password',
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', 'some\\user', '-h', 'somehost', '-p', '444', 'dbname'],
'somehost:444:dbname:some\\\\user:some\\\\password',
)
)
def test_accent(self):
username = 'rôle'
password = 'sésame'
pgpass_string = 'somehost:444:dbname:%s:%s' % (username, password)
self.assertEqual(
self._run_it({
'database': 'dbname',
'user': username,
'password': password,
'host': 'somehost',
'port': '444',
}), (
['psql', '-U', username, '-h', 'somehost', '-p', '444', 'dbname'],
pgpass_string,
)
)
def test_sigint_handler(self):
"""SIGINT is ignored in Python and passed to psql to abort quries."""
def _mock_subprocess_call(*args):
handler = signal.getsignal(signal.SIGINT)
self.assertEqual(handler, signal.SIG_IGN)
sigint_handler = signal.getsignal(signal.SIGINT)
# The default handler isn't SIG_IGN.
self.assertNotEqual(sigint_handler, signal.SIG_IGN)
with mock.patch('subprocess.check_call', new=_mock_subprocess_call):
DatabaseClient.runshell_db({})
# dbshell restores the original handler.
self.assertEqual(sigint_handler, signal.getsignal(signal.SIGINT))
|
{
"content_hash": "21fb1bc0a2e0b83b1192b88244900cf0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 34.80172413793103,
"alnum_prop": 0.4877384196185286,
"repo_name": "frankvdp/django",
"id": "8e5af5f1f352fde876276d923ad52a0549d0330c",
"size": "4039",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tests/dbshell/test_postgresql_psycopg2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174031"
},
{
"name": "JavaScript",
"bytes": "249623"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11310936"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
__title__ = 'asana'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Asana, Inc.'
from asana.version import __version__
from .client import Client
|
{
"content_hash": "9cdffa4700b746efa9d65d52e973b2b4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6688741721854304,
"repo_name": "Asana/python-asana",
"id": "863b9d3120671c67cb8c0aabe30b9277caddce8d",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asana/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "509"
},
{
"name": "Mustache",
"bytes": "2128"
},
{
"name": "Python",
"bytes": "449126"
}
],
"symlink_target": ""
}
|
from pyvisdk.thirdparty import Enum
VAppAutoStartAction = Enum(
'guestShutdown',
'none',
'powerOff',
'powerOn',
'suspend',
)
|
{
"content_hash": "8080855f9e1081fb08bcca427ae47e84",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 35,
"avg_line_length": 14.7,
"alnum_prop": 0.6326530612244898,
"repo_name": "xuru/pyvisdk",
"id": "4f31a9146a8cef3237f1238093ae5e14b3691b5e",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/enums/v_app_auto_start_action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
from builtins import object
import json
import sys
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from desktop.settings import BASE_DIR
from useradmin.models import User
from azure.conf import ABFS_CLUSTERS
from beeswax.server import dbms
from indexer.indexers.sql import SQLIndexer
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock, MagicMock
else:
from mock import patch, Mock, MagicMock
class TestSQLIndexer(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="empty", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
def test_create_table_from_a_file_to_csv(self):
fs = Mock(
stats=Mock(return_value={'mode': 0o0777})
)
def source_dict(key):
return {
'path': 'hdfs:///path/data.csv',
'format': {'quoteChar': '"', 'fieldSeparator': ','},
'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id'}],
'sourceType': 'hive'
}.get(key, Mock())
source = MagicMock()
source.__getitem__.side_effect = source_dict
def destination_dict(key):
return {
'name': 'default.export_table',
'tableFormat': 'csv',
'importData': True,
'nonDefaultLocation': '/user/hue/customer_stats.csv',
'columns': [{'name': 'id', 'type': 'int'}],
'partitionColumns': [{'name': 'day', 'type': 'date', 'partitionValue': '20200101'}],
'description': 'No comment!',
'sourceType': 'hive-1'
}.get(key, Mock())
destination = MagicMock()
destination.__getitem__.side_effect = destination_dict
with patch('notebook.models.get_interpreter') as get_interpreter:
notebook = SQLIndexer(user=self.user, fs=fs).create_table_from_a_file(source, destination)
assert_equal(
[statement.strip() for statement in u'''DROP TABLE IF EXISTS `default`.`hue__tmp_export_table`;
CREATE TABLE `default`.`hue__tmp_export_table`
(
`id` int ) COMMENT "No comment!"
PARTITIONED BY (
`day` date )
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
WITH SERDEPROPERTIES ("separatorChar" = ",",
"quoteChar" = """,
"escapeChar" = "\\\\"
)
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;
LOAD DATA INPATH 'hdfs:///path/data.csv' INTO TABLE `default`.`hue__tmp_export_table` PARTITION (day='20200101');
CREATE TABLE `default`.`export_table` COMMENT "No comment!"
STORED AS csv
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_export_table`;
DROP TABLE IF EXISTS `default`.`hue__tmp_export_table`;'''.split(';')],
[statement.strip() for statement in notebook.get_data()['snippets'][0]['statement_raw'].split(';')]
)
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {
u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False,
u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type':
u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False,
u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file',
u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"',
u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True,
u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'},
{u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'},
{u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'',
u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'},
{u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}],
u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'',
u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False
}
destination = {
u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive',
u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats',
u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'},
{u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'',
u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100,
u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}],
u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [],
u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}],
u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'',
u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True,
u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1,
u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [],
u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats',
u'tableFormat': u'text', 'ouputFormat': u'table',
u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders',
u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True,
u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'},
{u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'},
{u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}],
u'customCollectionDelimiter': u'\\002'
}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true(
'''LOAD DATA INPATH '/user/romain/customer_stats.csv' '''
'''INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql,
sql
)
def test_generate_create_kudu_table_with_data():
source = {
u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file',
u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','},
u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default',
u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}],
u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False
}
destination = {
u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala',
u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data',
u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'},
{u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'',
u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True,
u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'},
{u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True,
u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',',
u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default',
u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES',
u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'],
u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data',
u'tableFormat': u'kudu',
u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,'
'longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint',
u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'funny', u'level': 0, u'scale': 4, u'precision': 10, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'},
{u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'},
{u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'},
{u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None
}
request = MockRequest(fs=MockFs())
with patch('hadoop.fs.hadoopfs.Hdfs.split') as split:
split.return_value = ('/A', 'a')
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, '''
'''`full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`''' in sql,
sql
)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694",'''
'''"-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US",'''
'''"Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746",'''
'''"-78.6252975464"],'''
'''["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"",'''
'''"nested":[],'''
'''"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double",'''
'''"showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":'''
'''"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,'''
'''"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],'''
'''"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,'''
'''"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,'''
'''"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},'''
'''{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":'''
'''"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":'''
'''"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"",'''
'''"tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv",'''
'''"fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":'''
'''"default.query-hive-360"}'''
)
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table"'''
''',"apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},'''
'''{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],'''
'''"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],'''
'''"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":'''
'''"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,'''
'''"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":'''
'''[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":'''
'''100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,'''
'''vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":'''
'''"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":'''
'''{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},'''
'''"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,'''
'''"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":'''
'''"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},'''
'''{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc",'''
'''"name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,'''
'''"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":'''
'''false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694",'''
'''"-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US",'''
'''"Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746",'''
'''"-78.6252975464"],'''
'''["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"",'''
'''"nested":[],'''
'''"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,'''
'''"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"",'''
'''"nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},'''
'''{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],'''
'''"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual",'''
'''"name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],'''
'''"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default",'''
'''"apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n",'''
'''"quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}'''
)
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":'''
'''"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":'''
'''[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},'''
'''{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],'''
'''"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":'''
'''"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,'''
'''tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,'''
'''"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc",'''
'''"KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=",'''
'''"upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":'''
'''[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH",'''
'''"int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},'''
'''{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},'''
'''{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],'''
'''"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,'''
'''"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003",'''
'''"customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", '''
'''"name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table",'''
'''"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":'''
'''"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"",'''
'''"nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,'''
'''vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":'''
'''"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":'''
'''{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},'''
'''"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,'''
'''"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text",'''
'''"name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},'''
'''{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],'''
'''"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,'''
'''"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":'''
'''false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
def test_create_ddl_with_nonascii():
source = {u'kafkaFieldType': u'delimited', u'rdbmsUsername': u'', u'kafkaFieldTypes': u'',
u'selectedTableIndex': 0, u'rdbmsJdbcDriverNames': [], u'tableName': u'',
u'sample': [[u'Weihaiwei', u'\u5a01\u6d77\u536b\u5e02', u'Weihai', u'\u5a01\u6d77\u5e02', u'1949-11-01'],
[u'Xingshan', u'\u5174\u5c71\u5e02', u'Hegang', u'\u9e64\u5c97\u5e02', u'1950-03-23'],
[u"Xi'an", u'\u897f\u5b89\u5e02', u'Liaoyuan', u'\u8fbd\u6e90\u5e02', u'1952-04-03'],
[u'Nanzheng', u'\u5357\u90d1\u5e02', u'Hanzhong', u'\u6c49\u4e2d\u5e02', u'1953-10-24'],
[u'Dihua', u'\u8fea\u5316\u5e02', u'?r\xfcmqi', u'\u4e4c\u9c81\u6728\u9f50\u5e02', u'1953-11-20']],
u'rdbmsTypes': [], u'isFetchingDatabaseNames': False, u'rdbmsDbIsValid': False, u'query': u'',
u'channelSourceSelectedHosts': [], u'table': u'', u'rdbmsAllTablesSelected': False,
u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'rdbmsPassword': u'',
u'isObjectStore': False, u'tables': [{u'name': u''}], u'streamUsername': u'',
u'kafkaSchemaManual': u'detect', u'connectorSelection': u'sfdc', u'namespace':
{u'status': u'CREATED', u'computes':
[{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}],
u'id': u'default', u'name': u'default'}, u'rdbmsIsAllTables': False, u'rdbmsDatabaseNames': [],
u'hasStreamSelected': False, u'channelSourcePath': u'/var/log/hue-httpd/access_log',
u'channelSourceHosts': [], u'show': True, u'streamObjects': [], u'streamPassword': u'',
u'tablesNames': [], u'sampleCols': [{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'Before', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'old_Chinese_name', u'level': 0, u'keyType':
u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'After', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'new_Chinese_name', u'level': 0, u'keyType':
u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'Renamed_date', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0}], u'rdbmsDatabaseName': u'',
u'sourceType': u'hive', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',',
u'hasHeader': True, u'quoteChar': u'"',
u'recordSeparator': u'\\n', u'type': u'csv'},
u'connectorList': [{u'name': u'Salesforce', u'value': u'sfdc'}], u'kafkaFieldDelimiter': u',',
u'rdbmsPort': u'', u'rdbmsTablesExclude': [], u'isFetchingDriverNames': False, u'publicStreams':
[{u'name': u'Kafka Topics', u'value': u'kafka'}, {u'name': u'Flume Agent', u'value': u'flume'}],
u'channelSourceTypes': [{u'name': u'Directory or File', u'value': u'directory'},
{u'name': u'Program', u'value': u'exec'},
{u'name': u'Syslogs', u'value': u'syslogs'},
{u'name': u'HTTP', u'value': u'http'}],
u'databaseName': u'default', u'inputFormats': [{u'name': u'File', u'value': u'file'},
{u'name': u'External Database', u'value': u'rdbms'},
{u'name': u'Manually', u'value': u'manual'}],
u'path': u'/user/admin/renamed_chinese_cities_gb2312.csv', u'streamToken': u'', u'kafkaFieldNames': u'',
u'streamSelection': u'kafka', u'compute': {u'credentials': {}, u'type': u'direct',
u'id': u'default', u'name': u'default'},
u'name': u'', u'kafkaFieldSchemaPath': u'', u'kafkaTopics': [], u'rdbmsJdbcDriver': u'',
u'rdbmsHostname': u'', u'isFetchingTableNames': False, u'rdbmsType': None, u'inputFormatsAll':
[{u'name': u'File', u'value': u'file'}, {u'name': u'External Database', u'value': u'rdbms'},
{u'name': u'Manually', u'value': u'manual'}], u'rdbmsTableNames': [],
u'streamEndpointUrl': u'https://login.salesforce.com/services/Soap/u/42.0', u'kafkaSelectedTopics': u''}
destination = {u'isTransactionalVisible': True, u'KUDU_DEFAULT_PARTITION_COLUMN':
{u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions':
[{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'namespaces':
[{u'status': u'CREATED', u'computes': [{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}],
u'id': u'default', u'name': u'default'}], u'isTargetChecking': False, 'ouputFormat': u'table',
u'tableName': u'renamed_chinese_cities_gb2312', u'outputFormatsList':
[{u'name': u'Table', u'value': u'table'}, {u'name': u'Search index', u'value': u'index'},
{u'name': u'Database', u'value': u'database'}, {u'name': u'Folder', u'value': u'file'},
{u'name': u'HBase Table', u'value': u'hbase'}],
u'fieldEditorPlaceHolder': u'Example: SELECT * FROM [object Promise]', u'indexerDefaultField': [],
u'fieldEditorValue':
u'SELECT Before,\n old_Chinese_name,\n After,\n new_Chinese_name,\n Renamed_date\n FROM [object Promise];',
u'customRegexp': u'', u'customLineDelimiter': u'\\n', u'isTargetExisting': False,
u'customEnclosedByDelimiter': u"'", u'indexerConfigSets': [], u'sourceType': u'hive',
u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'numMappers': 1,
u'fieldEditorDatabase': u'default', u'namespace': {u'status': u'CREATED', u'computes':
[{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}], u'id': u'default', u'name': u'default'},
u'indexerPrimaryKeyObject': [], u'kuduPartitionColumns': [], u'rdbmsFileOutputFormats':
[{u'name': u'text', u'value': u'text'}, {u'name': u'sequence', u'value': u'sequence'},
{u'name': u'avro', u'value': u'avro'}], u'outputFormats': [{u'name': u'Table', u'value': u'table'},
{u'name': u'Search index', u'value': u'index'}],
u'fieldEditorEnabled': False, u'indexerDefaultFieldObject': [],
u'customMapDelimiter': u'', u'partitionColumns': [], u'rdbmsFileOutputFormat': u'text',
u'showProperties': False, u'isTransactional': True, u'useDefaultLocation': True, u'description': u'',
u'customFieldsDelimiter': u',', u'primaryKeyObjects': [], u'customFieldDelimiter': u',',
u'rdbmsSplitByColumn': [], u'existingTargetUrl': u'', u'channelSinkTypes':
[{u'name': u'This topic', u'value': u'kafka'}, {u'name': u'Solr', u'value': u'solr'},
{u'name': u'HDFS', u'value': u'hdfs'}], u'defaultName': u'default.renamed_chinese_cities_gb2312',
u'isTransactionalUpdateEnabled': False, u'importData': True, u'databaseName': u'default',
u'indexerRunJob': False, u'indexerReplicationFactor': 1, u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN':
{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'indexerConfigSet': u'',
u'sqoopJobLibPaths': [{u'path': u''}], u'outputFormat': u'table',
u'nonDefaultLocation': u'/user/admin/renamed_chinese_cities_gb2312.csv',
u'compute': {u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'},
u'name': u'default.renamed_chinese_cities_gb2312', u'tableFormat': u'text', u'isInsertOnly': True,
u'targetNamespaceId': u'default', u'bulkColumnNames': u'Before,old_Chinese_name,After,new_Chinese_name,Renamed_date',
u'columns': [{u'operations': [], u'comment': u'', u'unique': False, u'name': u'Before', u'level': 0,
u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'old_Chinese_name',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'After', u'level': 0,
u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'new_Chinese_name',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'Renamed_date',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0}],
u'hasHeader': True, u'indexerPrimaryKey': [], u'tableFormats':
[{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'},
{u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'},
{u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'},
{u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'', u'indexerNumShards': 1,
u'useFieldEditor': False, u'indexerJobLibPath': u'/tmp/smart_indexer_lib'}
file_encoding = u'gb2312'
path = {
'isDir': False,
'split': ('/user/admin', 'renamed_chinese_cities_gb2312.csv'),
'listdir': ['/user/admin/data'],
'parent_path': '/user/admin/.scratchdir/03d184ad-dd11-4ae1-aace-378daaa094e5/renamed_chinese_cities_gb2312.csv/..'
}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination, start_time=-1,
file_encoding=file_encoding).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE TABLE `default`.`hue__tmp_renamed_chinese_cities_gb2312`
(
`Before` string ,
`old_Chinese_name` string ,
`After` string ,
`new_Chinese_name` string ,
`Renamed_date` string ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
statement = "LOAD DATA INPATH '/user/admin/renamed_chinese_cities_gb2312.csv' " + \
"INTO TABLE `default`.`hue__tmp_renamed_chinese_cities_gb2312`;"
assert_true(statement in sql, sql)
statement = '''CREATE TABLE `default`.`renamed_chinese_cities_gb2312`
STORED AS TextFile
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_renamed_chinese_cities_gb2312`;'''
assert_true(statement in sql, sql)
statement = '''DROP TABLE IF EXISTS `default`.`hue__tmp_renamed_chinese_cities_gb2312`;'''
assert_true(statement in sql, sql)
statement = '''ALTER TABLE `default`.`renamed_chinese_cities_gb2312` ''' + \
'''SET serdeproperties ("serialization.encoding"="gb2312");'''
assert_true(statement in sql, sql)
def test_create_ddl_with_abfs():
finish = ABFS_CLUSTERS.set_for_testing(
{
'default': {
'fs_defaultfs': 'abfs://my-data@yingstorage.dfs.core.windows.net',
'webhdfs_url': 'https://yingstorage.dfs.core.windows.net'
}
}
)
form_data = {'path': u'abfs://my-data/test_data/cars.csv', 'partition_columns': [], 'overwrite': False}
sql = ''
request = MockRequest(fs=MockFs())
query_server_config = dbms.get_query_server_config(name='impala')
db = dbms.get(request.user, query_server=query_server_config)
try:
sql = "\n\n%s;" % db.load_data('default', 'cars', form_data, None, generate_ddl_only=True)
finally:
finish()
assert_true(u"\'abfs://my-data@yingstorage.dfs.core.windows.net/test_data/cars.csv\'" in sql)
def test_create_table_from_local():
with patch('indexer.indexers.sql.get_interpreter') as get_interpreter:
get_interpreter.return_value = {'Name': 'Hive', 'dialect': 'hive'}
source = {
'path': '',
'sourceType': 'hive'
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'date', 'type': 'timestamp'},
{'name': 'hour', 'type': 'bigint'},
{'name': 'minute', 'type': 'bigint'},
{'name': 'dep', 'type': 'bigint'},
{'name': 'arr', 'type': 'bigint'},
{'name': 'dep_delay', 'type': 'bigint'},
{'name': 'arr_delay', 'type': 'bigint'},
{'name': 'carrier', 'type': 'string'},
{'name': 'flight', 'type': 'bigint'},
{'name': 'dest', 'type': 'string'},
{'name': 'plane', 'type': 'string'},
{'name': 'cancelled', 'type': 'boolean'},
{'name': 'time', 'type': 'bigint'},
{'name': 'dist', 'type': 'bigint'},
],
'indexerPrimaryKey': [],
'sourceType': 'hive'
}
sql = SQLIndexer(user=Mock(), fs=Mock()).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1 (
`date` timestamp,
`hour` bigint,
`minute` bigint,
`dep` bigint,
`arr` bigint,
`dep_delay` bigint,
`arr_delay` bigint,
`carrier` string,
`flight` bigint,
`dest` string,
`plane` string,
`cancelled` boolean,
`time` bigint,
`dist` bigint);'''
assert_equal(statement, sql)
def test_create_table_from_local_mysql():
with patch('indexer.indexers.sql.get_interpreter') as get_interpreter:
get_interpreter.return_value = {'Name': 'MySQL', 'dialect': 'mysql'}
source = {
'path': BASE_DIR + '/apps/beeswax/data/tables/us_population.csv',
'sourceType': 'mysql',
'format': {'hasHeader': False}
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'field_1', 'type': 'string'},
{'name': 'field_2', 'type': 'string'},
{'name': 'field_3', 'type': 'bigint'},
],
'sourceType': 'mysql'
}
sql = SQLIndexer(user=Mock(), fs=Mock()).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1 (
`field_1` VARCHAR(255),
`field_2` VARCHAR(255),
`field_3` bigint);
INSERT INTO default.test1 VALUES ('NY', 'New York', '8143197'), ('CA', 'Los Angeles', '3844829'), \
('IL', 'Chicago', '2842518'), ('TX', 'Houston', '2016582'), ('PA', 'Philadelphia', '1463281'), \
('AZ', 'Phoenix', '1461575'), ('TX', 'San Antonio', '1256509'), ('CA', 'San Diego', '1255540'), \
('TX', 'Dallas', '1213825'), ('CA', 'San Jose', '912332');'''
assert_equal(statement, sql)
def test_create_table_from_local_phoenix():
with patch('indexer.indexers.sql.get_interpreter') as get_interpreter:
get_interpreter.return_value = {'Name': 'Phoenix', 'dialect': 'phoenix'}
source = {
'path': BASE_DIR + '/apps/beeswax/data/tables/us_population.csv',
'sourceType': 'phoenix',
'format': {'hasHeader': False}
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'field_1', 'type': 'string'},
{'name': 'field_2', 'type': 'string'},
{'name': 'field_3', 'type': 'bigint'},
],
'sourceType': 'phoenix',
'primaryKeys': ['field_3']
}
sql = SQLIndexer(user=Mock(), fs=Mock()).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1 (
field_1 CHAR(255),
field_2 CHAR(255),
field_3 bigint
CONSTRAINT my_pk PRIMARY KEY (field_3));
UPSERT INTO default.test1 VALUES ('NY', 'New York', 8143197);
UPSERT INTO default.test1 VALUES ('CA', 'Los Angeles', 3844829);
UPSERT INTO default.test1 VALUES ('IL', 'Chicago', 2842518);
UPSERT INTO default.test1 VALUES ('TX', 'Houston', 2016582);
UPSERT INTO default.test1 VALUES ('PA', 'Philadelphia', 1463281);
UPSERT INTO default.test1 VALUES ('AZ', 'Phoenix', 1461575);
UPSERT INTO default.test1 VALUES ('TX', 'San Antonio', 1256509);
UPSERT INTO default.test1 VALUES ('CA', 'San Diego', 1255540);
UPSERT INTO default.test1 VALUES ('TX', 'Dallas', 1213825);
UPSERT INTO default.test1 VALUES ('CA', 'San Jose', 912332);'''
assert_equal(statement, sql)
def test_create_table_from_local_impala():
with patch('indexer.indexers.sql.get_interpreter') as get_interpreter:
get_interpreter.return_value = {'Name': 'Impala', 'dialect': 'impala'}
source = {
'path': BASE_DIR + '/apps/beeswax/data/tables/flights.csv',
'sourceType': 'impala',
'format': {'hasHeader': True}
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'date', 'type': 'timestamp'},
{'name': 'hour', 'type': 'bigint'},
{'name': 'minute', 'type': 'bigint'},
{'name': 'dep', 'type': 'bigint'},
{'name': 'arr', 'type': 'bigint'},
{'name': 'dep_delay', 'type': 'bigint'},
{'name': 'arr_delay', 'type': 'bigint'},
{'name': 'carrier', 'type': 'string'},
{'name': 'flight', 'type': 'bigint'},
{'name': 'dest', 'type': 'string'},
{'name': 'plane', 'type': 'string'},
{'name': 'cancelled', 'type': 'boolean'},
{'name': 'time', 'type': 'bigint'},
{'name': 'dist', 'type': 'bigint'},
],
'sourceType': 'impala'
}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1_tmp (
`date` string,
`hour` string,
`minute` string,
`dep` string,
`arr` string,
`dep_delay` string,
`arr_delay` string,
`carrier` string,
`flight` string,
`dest` string,
`plane` string,
`cancelled` string,
`time` string,
`dist` string);
INSERT INTO default.test1_tmp VALUES \
('2011-12-14 12:00:00', '13', '4', '1304', '1704', '24', '14', 'WN', '3085', 'PHL', 'N524SW', '1', '159', '1336'), \
('2011-12-14 12:00:00', '17', '52', '1752', '1943', '12', '8', 'WN', '39', 'PHX', 'N503SW', '1', '155', '1020'), \
('2011-12-14 12:00:00', '7', '9', '709', '853', '-1', '-12', 'WN', '424', 'PHX', 'N761RR', '1', '152', '1020'), \
('2011-12-14 12:00:00', '13', '32', '1332', '1514', '17', '4', 'WN', '1098', 'PHX', 'N941WN', '1', '151', '1020'), \
('2011-12-14 12:00:00', '9', '55', '955', '1141', '5', '-4', 'WN', '1403', 'PHX', 'N472WN', '1', '155', '1020'), \
('2011-12-14 12:00:00', '16', '13', '1613', '1731', '8', '-4', 'WN', '33', 'SAN', 'N707SA', '1', '185', '1313'), \
('2011-12-14 12:00:00', '11', '45', '1145', '1257', '5', '-13', 'WN', '1212', 'SAN', 'N279WN', '0', '183', '1313'), \
('2011-12-14 12:00:00', '20', '16', '2016', '2112', '36', '32', 'WN', '207', 'SAT', 'N929WN', '0', '44', '192');
CREATE TABLE IF NOT EXISTS default.test1
AS SELECT
CAST ( `date` AS timestamp ) `date`,
CAST ( `hour` AS bigint ) `hour`,
CAST ( `minute` AS bigint ) `minute`,
CAST ( `dep` AS bigint ) `dep`,
CAST ( `arr` AS bigint ) `arr`,
CAST ( `dep_delay` AS bigint ) `dep_delay`,
CAST ( `arr_delay` AS bigint ) `arr_delay`,
CAST ( `carrier` AS string ) `carrier`,
CAST ( `flight` AS bigint ) `flight`,
CAST ( `dest` AS string ) `dest`,
CAST ( `plane` AS string ) `plane`,
CAST ( CAST ( `cancelled` AS TINYINT ) AS boolean ) `cancelled`,
CAST ( `time` AS bigint ) `time`,
CAST ( `dist` AS bigint ) `dist`
FROM default.test1_tmp;
DROP TABLE IF EXISTS default.test1_tmp;'''
assert_equal(statement, sql)
|
{
"content_hash": "8ca623766ca5058218e4ebac37027fe6",
"timestamp": "",
"source": "github",
"line_count": 987,
"max_line_length": 138,
"avg_line_length": 66.3920972644377,
"alnum_prop": 0.5897694150681377,
"repo_name": "kawamon/hue",
"id": "a905ffe516febdd86e88e2c8dc8932ec34739b90",
"size": "66345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/indexer/src/indexer/indexers/sql_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
import re
import json
import os.path
import logging
from datetime import datetime
from requests_oauthlib import OAuth1Session
from models import AirCondition, AirAverage
from leda import settings
# From https://twitter.com/Guangzhou_Air/following
CITYS = {
'Guangzhou_Air': 'Guangzhou',
'BeijingAir': 'Beijing',
'Shenyang_Air': 'Shenyang',
'CGChengduAir': 'Chengdu',
'CGShanghaiAir': 'Shanghai',
}
# From http://aqicn.org/faq/2013-09-09/revised-pm25-aqi-breakpoints/
level_map = {
'No data': -1,
'Good': 1,
'Moderate': 2,
'Unhealthy for Sensitive Groups': 3,
'Unhealthy': 4, # 100 < value < 150
'Very Unhealthy': 5,
'Hazardous': 6,
'Beyond Index': 7, # for Beijing value > 500
'Unknown': 8,
}
logger = logging.getLogger('crawler')
class SinceId(object):
"""Creating a file to save since id"""
def __init__(self, fname='since_id.json'):
self._module_path = os.path.dirname(__file__)
self._file_path = os.path.join(self._module_path, fname)
self._ids = self._recover()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._save()
def __getitem__(self, item):
if not self._ids:
return None
return self._ids.get(item)
def __setitem__(self, key, value):
self._ids[key] = value
def _save(self):
if not self._ids:
return
with open(self._file_path, mode='w') as f:
json.dump(self._ids, f)
def _recover(self):
"""Recovery `since_id` from file"""
if os.path.isfile(self._file_path):
with open(self._file_path) as f:
json_data = json.load(f)
return json_data
return dict()
def get_datetime(s):
return datetime.strptime(s, "%m-%d-%Y %H:%M")
def get_timeline(username, since_id=None, count=0):
"""Get the specified twitter user's timeline.
:param username: The screen name of the user for whom to return results for.
:type username: str
:param since_id: Returns results with an ID greater than the specified ID.
:type since_id: str or None
:param count: Specifies the number of tweets to try and retrieve.
:type count: int
:rtype : list
"""
twitter = OAuth1Session(client_key=settings.CLIENT_KEY, client_secret=settings.CLIENT_SECRET,
resource_owner_key=settings.ACCESS_TOKEN_KEY,
resource_owner_secret=settings.ACCESS_TOKEN_SECRET)
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
params = {
'screen_name': username,
}
if since_id:
params.update(since_id=since_id)
if count:
params.update(count=count)
r = twitter.get(url, params=params)
return r.json()
def match(tweet=None):
"""Get the information we need in tweet and return the match group dict.
:type tweet dict
:rtype : dict
"""
text = tweet['text']
if 'avg' in text:
pattern = re.compile(r'^(?P<from_time>\d{2}-\d{2}-\d{4}\s\d{2}:\d{2})\sto\s'
r'(?P<to_time>\d{2}-\d{2}-\d{4}\s\d{2}:\d{2});[^;]+;\s'
r'(?P<pm2_5>\d+\.\d);\s(?P<aqi>\d+);\s(?P<level>[^(]+)$', flags=re.UNICODE)
else:
pattern = re.compile(r'^(?P<time>\d{2}-\d{2}-\d{4}\s\d{2}:\d{2});\sPM2.5;\s(?P<pm2_5>\d+\.\d);\s'
r'(?P<aqi>\d+);\s(?P<level>[^(]+)', flags=re.UNICODE)
data = re.match(pattern, text)
if data:
return data.groupdict()
# try find out this tweet is "no data" or not
nodata = re.match(r'(?P<time>\d{2}-\d{2}-\d{4}\s\d{2}:\d{2});\sPM2.5;\s(?P<info>No\sData)', text, flags=re.UNICODE)
if nodata:
logger.debug("No data")
return nodata.groupdict()
logger.warning("Fail to match tweet")
logger.warning(text)
return None
def crawl(since_id, city, count=200):
logger.debug("Start crawling {}".format(CITYS[city]))
tweets = get_timeline(city, since_id=since_id[city], count=count)
for tweet in tweets:
logger.debug(tweet['text'])
msg = match(tweet)
# if it match tweet, there are three condition: avg, hourly or nodata
if msg:
# hourly
if 'aqi' in msg and 'time' in msg:
air = AirCondition(pm2_5=float(msg.get('pm2_5')), aqi=int(msg.get('aqi')),
time=get_datetime(msg.get('time')), city=CITYS[city],
level=level_map[msg.get('level').strip()])
# 12h avg
elif 'from_time' in msg:
air = AirAverage(pm2_5=float(msg.get('pm2_5')), aqi=int(msg.get('aqi')),
from_time=get_datetime(msg.get('from_time')), to_time=get_datetime(msg.get('to_time')),
city=CITYS[city], level=level_map[msg.get('level').strip()])
# no data
elif 'info' in msg:
air = AirCondition(pm2_5=float(-1.0), aqi=int(-1), time=get_datetime(msg.get('time')),
city=CITYS[city], level=-1)
# unknown
else:
break
air.save()
logger.debug('New data saved.')
# save since_id after success
if tweets:
# since_id.save(city, tweets[0]['id'])
since_id[city] = tweets[0]['id']
logger.debug("{}: {}".format(city, tweets[0]['id']))
logger.debug('done')
def run():
with SinceId() as since_id:
for city in CITYS.keys():
crawl(since_id, city)
|
{
"content_hash": "02cd60048d40a66a56413cf6a7f063a9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 120,
"avg_line_length": 33.583333333333336,
"alnum_prop": 0.554058844381425,
"repo_name": "banbanchs/leda",
"id": "d4330f1f698805e7b33ce8779710fabef67af313",
"size": "5658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9067"
},
{
"name": "HTML",
"bytes": "5602"
},
{
"name": "JavaScript",
"bytes": "12230"
},
{
"name": "Python",
"bytes": "18502"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_hvac_air_loop_terminal_units import AirTerminalSingleDuctVavHeatAndCoolReheat
log = logging.getLogger(__name__)
class TestAirTerminalSingleDuctVavHeatAndCoolReheat(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airterminalsingleductvavheatandcoolreheat(self):
pyidf.validation_level = ValidationLevel.error
obj = AirTerminalSingleDuctVavHeatAndCoolReheat()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# node
var_damper_air_outlet_node_name = "node|Damper Air Outlet Node Name"
obj.damper_air_outlet_node_name = var_damper_air_outlet_node_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# real
var_maximum_air_flow_rate = 0.0001
obj.maximum_air_flow_rate = var_maximum_air_flow_rate
# real
var_zone_minimum_air_flow_fraction = 0.5
obj.zone_minimum_air_flow_fraction = var_zone_minimum_air_flow_fraction
# node
var_hot_water_or_steam_inlet_node_name = "node|Hot Water or Steam Inlet Node Name"
obj.hot_water_or_steam_inlet_node_name = var_hot_water_or_steam_inlet_node_name
# alpha
var_reheat_coil_object_type = "Coil:Heating:Water"
obj.reheat_coil_object_type = var_reheat_coil_object_type
# object-list
var_reheat_coil_name = "object-list|Reheat Coil Name"
obj.reheat_coil_name = var_reheat_coil_name
# real
var_maximum_hot_water_or_steam_flow_rate = 0.0
obj.maximum_hot_water_or_steam_flow_rate = var_maximum_hot_water_or_steam_flow_rate
# real
var_minimum_hot_water_or_steam_flow_rate = 0.0
obj.minimum_hot_water_or_steam_flow_rate = var_minimum_hot_water_or_steam_flow_rate
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# real
var_convergence_tolerance = 0.0001
obj.convergence_tolerance = var_convergence_tolerance
# real
var_maximum_reheat_air_temperature = 0.0001
obj.maximum_reheat_air_temperature = var_maximum_reheat_air_temperature
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].name, var_name)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].damper_air_outlet_node_name, var_damper_air_outlet_node_name)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].maximum_air_flow_rate, var_maximum_air_flow_rate)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].zone_minimum_air_flow_fraction, var_zone_minimum_air_flow_fraction)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].hot_water_or_steam_inlet_node_name, var_hot_water_or_steam_inlet_node_name)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].reheat_coil_object_type, var_reheat_coil_object_type)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].reheat_coil_name, var_reheat_coil_name)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].maximum_hot_water_or_steam_flow_rate, var_maximum_hot_water_or_steam_flow_rate)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].minimum_hot_water_or_steam_flow_rate, var_minimum_hot_water_or_steam_flow_rate)
self.assertEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].convergence_tolerance, var_convergence_tolerance)
self.assertAlmostEqual(idf2.airterminalsingleductvavheatandcoolreheats[0].maximum_reheat_air_temperature, var_maximum_reheat_air_temperature)
|
{
"content_hash": "a0f96777d1f84d436a93d12e955f173f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 161,
"avg_line_length": 53.05555555555556,
"alnum_prop": 0.7183246073298429,
"repo_name": "rbuffat/pyidf",
"id": "b9c98e91cad1ecb7e712b9efa4a10f200a0849e5",
"size": "4775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_airterminalsingleductvavheatandcoolreheat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
}
|
from eventlet import greenthread
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
cfg.CONF.register_opt(
cfg.IntOpt('agent_down_time', default=9,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")))
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_plugin_base_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
return self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry as e:
if e.columns == ['agent_type', 'host']:
# It might happen that two or more concurrent transactions are
# trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has been
# no such entry in the table and multiple agent status updates
# are being processed at the moment). In this case having a
# unique constraint on (agent_type, host) columns guarantees
# that only one transaction will succeed and insert a new agent
# entry, others will fail and be rolled back. That means we
# must retry them one more time: no INSERTs will be issued,
# because _get_agent_by_type_and_host() will return the
# existing agent entry, which will be updated multiple times
return self._create_or_update_agent(context, agent)
raise
class AgentExtRpcCallback(object):
"""Processes the rpc report in plugin implementations."""
RPC_API_VERSION = '1.0'
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
if self.START_TIME > time:
LOG.debug(_("Message with invalid timestamp received"))
return
agent_state = kwargs['agent_state']['agent_state']
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)
|
{
"content_hash": "7a244b3cc11e322f11331fcab3bd3f54",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 79,
"avg_line_length": 42.10204081632653,
"alnum_prop": 0.5984003877847794,
"repo_name": "noelbk/neutron-juniper",
"id": "9a574a3f27df477c41b77870fff2fdbae423ac23",
"size": "8938",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/db/agents_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Checks if all the libraries in setup.py are listed in installation.rst file
"""
import os
import re
import sys
from os.path import dirname
from typing import Dict, List, Set
from rich import print
from rich.console import Console
from rich.table import Table
AIRFLOW_SOURCES_DIR = os.path.join(dirname(__file__), os.pardir, os.pardir, os.pardir)
SETUP_PY_FILE = 'setup.py'
DOCS_FILE = os.path.join('docs', 'apache-airflow', 'extra-packages-ref.rst')
PY_IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_\.]*'
sys.path.insert(0, AIRFLOW_SOURCES_DIR)
from setup import ( # noqa # isort:skip
add_all_provider_packages,
EXTRAS_DEPRECATED_ALIASES,
EXTRAS_DEPENDENCIES,
PREINSTALLED_PROVIDERS,
)
def get_file_content(*path_elements: str) -> str:
file_path = os.path.join(AIRFLOW_SOURCES_DIR, *path_elements)
with open(file_path) as file_to_read:
return file_to_read.read()
def get_extras_from_setup() -> Set[str]:
"""Returns a set of regular (non-deprecated) extras from setup."""
return set(EXTRAS_DEPENDENCIES.keys()) - set(EXTRAS_DEPRECATED_ALIASES.keys())
def get_extras_from_docs() -> Set[str]:
"""
Returns a list of extras from airflow.docs.
"""
docs_content = get_file_content(DOCS_FILE)
extras_section_regex = re.compile(
rf'\|[^|]+\|.*pip install .apache-airflow\[({PY_IDENTIFIER})][^|]+\|[^|]+\|',
re.MULTILINE,
)
doc_extra_set: Set[str] = set()
for doc_extra in extras_section_regex.findall(docs_content):
doc_extra_set.add(doc_extra)
return doc_extra_set
def get_preinstalled_providers_from_docs() -> List[str]:
"""
Returns list of pre-installed providers from the doc.
"""
docs_content = get_file_content(DOCS_FILE)
preinstalled_section_regex = re.compile(
rf'\|\s*({PY_IDENTIFIER})\s*\|[^|]+pip install[^|]+\|[^|]+\|\s+\*\s+\|$',
re.MULTILINE,
)
return preinstalled_section_regex.findall(docs_content)
def get_deprecated_extras_from_docs() -> Dict[str, str]:
"""
Returns dict of deprecated extras from airflow.docs (alias -> target extra)
"""
deprecated_extras = {}
docs_content = get_file_content(DOCS_FILE)
deprecated_extras_section_regex = re.compile(
r'\| Deprecated extra \| Extra to be used instead \|\n(.*)\n', re.DOTALL
)
deprecated_extras_content = deprecated_extras_section_regex.findall(docs_content)[0]
deprecated_extras_regexp = re.compile(r'\|\s(\S+)\s+\|\s(\S*)\s+\|$', re.MULTILINE)
for extras in deprecated_extras_regexp.findall(deprecated_extras_content):
deprecated_extras[extras[0]] = extras[1]
return deprecated_extras
def check_extras(console: Console) -> bool:
"""
Checks if non-deprecated extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
extras_table = Table()
extras_table.add_column("NAME", justify="right", style="cyan")
extras_table.add_column("SETUP", justify="center", style="magenta")
extras_table.add_column("DOCS", justify="center", style="yellow")
non_deprecated_setup_extras = get_extras_from_setup()
non_deprecated_docs_extras = get_extras_from_docs()
for extra in non_deprecated_setup_extras:
if extra not in non_deprecated_docs_extras:
extras_table.add_row(extra, "V", "")
for extra in non_deprecated_docs_extras:
if extra not in non_deprecated_setup_extras:
extras_table.add_row(extra, "", "V")
if extras_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]CORE_EXTRAS_DEPENDENCIES[/bold]"
sections in the setup file: [bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of extras that:
* are used but are not documented,
* are documented but not used,
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(extras_table)
return False
return True
def check_deprecated_extras(console: Console) -> bool:
"""
Checks if deprecated extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
deprecated_setup_extras = EXTRAS_DEPRECATED_ALIASES
deprecated_docs_extras = get_deprecated_extras_from_docs()
deprecated_extras_table = Table()
deprecated_extras_table.add_column("DEPRECATED_IN_SETUP", justify="right", style="cyan")
deprecated_extras_table.add_column("TARGET_IN_SETUP", justify="center", style="magenta")
deprecated_extras_table.add_column("DEPRECATED_IN_DOCS", justify="right", style="cyan")
deprecated_extras_table.add_column("TARGET_IN_DOCS", justify="center", style="magenta")
for extra in deprecated_setup_extras.keys():
if extra not in deprecated_docs_extras:
deprecated_extras_table.add_row(extra, deprecated_setup_extras[extra], "", "")
elif deprecated_docs_extras[extra] != deprecated_setup_extras[extra]:
deprecated_extras_table.add_row(
extra, deprecated_setup_extras[extra], extra, deprecated_docs_extras[extra]
)
for extra in deprecated_docs_extras.keys():
if extra not in deprecated_setup_extras:
deprecated_extras_table.add_row("", "", extra, deprecated_docs_extras[extra])
if deprecated_extras_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]EXTRAS_DEPRECATED_ALIASES[/bold]" section in the setup file:\
[bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of deprecated extras that:
* are used but are not documented,
* are documented but not used,
* or have different target extra specified in the documentation or setup.
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(deprecated_extras_table)
return False
return True
def check_preinstalled_extras(console: Console) -> bool:
"""
Checks if preinstalled extras match setup vs. doc.
:param console: print table there in case of errors
:return: True if all ok, False otherwise
"""
preinstalled_providers_from_docs = get_preinstalled_providers_from_docs()
preinstalled_providers_from_setup = PREINSTALLED_PROVIDERS
preinstalled_providers_table = Table()
preinstalled_providers_table.add_column("PREINSTALLED_IN_SETUP", justify="right", style="cyan")
preinstalled_providers_table.add_column("PREINSTALLED_IN_DOCS", justify="center", style="magenta")
for provider in preinstalled_providers_from_setup:
if provider not in preinstalled_providers_from_docs:
preinstalled_providers_table.add_row(provider, "")
for provider in preinstalled_providers_from_docs:
if provider not in preinstalled_providers_from_setup:
preinstalled_providers_table.add_row("", provider)
if preinstalled_providers_table.row_count != 0:
print(
f"""\
[red bold]ERROR!![/red bold]
The "[bold]PREINSTALLED_PROVIDERS[/bold]" section in the setup file:\
[bold yellow]{SETUP_PY_FILE}[/bold yellow]
should be synchronized with the "Extra Packages Reference"
in the documentation file: [bold yellow]{DOCS_FILE}[/bold yellow].
Below is the list of preinstalled providers that:
* are used but are not documented,
* or are documented but not used.
[bold]Please synchronize setup/documentation files![/bold]
"""
)
console.print(preinstalled_providers_table)
return False
return True
if __name__ == '__main__':
status: List[bool] = []
# force adding all provider package dependencies, to check providers status
add_all_provider_packages()
main_console = Console()
status.append(check_extras(main_console))
status.append(check_deprecated_extras(main_console))
status.append(check_preinstalled_extras(main_console))
if all(status):
print("All extras are synchronized: [green]OK[/]")
sys.exit(0)
sys.exit(1)
|
{
"content_hash": "05a9b85cf2fabf6ab36ff37780e1f663",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 102,
"avg_line_length": 35.11440677966102,
"alnum_prop": 0.6772052612525643,
"repo_name": "danielvdende/incubator-airflow",
"id": "5417b624fd9640c9911073a6c888d48a47ad6d90",
"size": "9096",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/ci/pre_commit/pre_commit_check_setup_extra_packages_ref.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
class ItemPedido(object):
def __init__(self, descricao, peso, preco):
self.descricao = descricao
self.peso = peso
self.preco = preco
def subtotal(self):
return self.peso * self.preco
|
{
"content_hash": "2887e6ffa82af93c3a2b9702dfb0234a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 25,
"alnum_prop": 0.6044444444444445,
"repo_name": "pythonprobr/metaprog101",
"id": "ab65bd0192d9cd5536e1d3c15dbb17de7a0771c2",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pt-br/passo1/granel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50046"
},
{
"name": "Shell",
"bytes": "10256"
}
],
"symlink_target": ""
}
|
from ._redis_management_client import RedisManagementClient
from ._version import VERSION
__version__ = VERSION
try:
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"RedisManagementClient",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
{
"content_hash": "3b2c90c866fb539fee9270babcfe915c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 25,
"alnum_prop": 0.6755555555555556,
"repo_name": "Azure/azure-sdk-for-python",
"id": "dfcf2b7ee7840cfa2d33946cc136251196bac514",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/redis/azure-mgmt-redis/azure/mgmt/redis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
from .models import Website, Article, Concept, ArticleConcept
admin.site.register(Website)
admin.site.register(Article)
admin.site.register(Concept)
admin.site.register(ArticleConcept)
|
{
"content_hash": "c11d86f89287d78f269d17bd22fdcdfb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 61,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.8225806451612904,
"repo_name": "Soaring-Outliers/news_graph",
"id": "4f3bfc805607578a3d871aff7a66df1abe6a751a",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60170"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "7706"
},
{
"name": "JavaScript",
"bytes": "104667"
},
{
"name": "Python",
"bytes": "61999"
},
{
"name": "Shell",
"bytes": "3765"
}
],
"symlink_target": ""
}
|
"""
solc = Solidity compiler
You must install the solc binary first.
"""
from __future__ import print_function #This *must* be the first line
import sys
import os
import subprocess
# Contracts & related files
mul7 = 'mul7.sol'
abi_filename = 'test.abi'
evm_filename = 'test.binary'
def rm_temp_files():
try:
os.remove(abi_filename)
except OSError:
pass
try:
os.remove(evm_filename)
except OSError:
pass
def solc(src):
# TODO: Change from 'file' to 'stdout' and call separately
exit_code = subprocess.call(
['solc'
,'--input-file', src
,'--binary', 'file'
,'--json-abi', 'file'
]
)
if exit_code:
raise Exception('solc returned error code: {}'.format(exit_code))
abi_file = open(abi_filename)
evm_file = open(evm_filename)
rm_temp_files()
return abi_file.read(), evm_file.read()
# TODO: Add command line usage explanation
def main():
"""Handle command line parameters"""
i = 1
while i < len(sys.argv):
parm = sys.argv[i]
abi, evm = solc(parm)
print('')
print('===')
print(parm)
print('===')
print(abi)
print(evm)
print('===')
i = i+1
if __name__ == "__main__":
main()
|
{
"content_hash": "3058967bcad89f992d3637332a3e8fe2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 22,
"alnum_prop": 0.5417287630402384,
"repo_name": "joelcan/tools-eth-contract-dev",
"id": "2ca002b2396398ceea82f78627e84eb85034ac45",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1657"
},
{
"name": "Python",
"bytes": "513392"
},
{
"name": "Shell",
"bytes": "1202"
}
],
"symlink_target": ""
}
|
"""
def myrange(start, stop, step):
while start < stop:
yield start
start += step
if timers[0].value == 50:
for value in myrange(0, 5, 1):
print(value)
"""
func_stack = slice(int, 0, 50)
FUNC_MYRANGE = 90
FUNC_MYRANGE_RETURN_1 = 91
FUNC_MYRANGE_RESUME_1 = 92
FUNC_MYRANGE_FRAME_SIZE = 6
FUNC_MYRANGE_RETURN_TO = 0
FUNC_MYRANGE_ARG_START = 1
FUNC_MYRANGE_ARG_STOP = 2
FUNC_MYRANGE_ARG_STEP = 3
FUNC_MYRANGE_RESULT_1 = 4
FUNC_MYRANGE_RESUME_FROM = 5
END = 99
if timers[0].value == 50:
# it = myrange(0, 5, 1)
it = [
0, # reserve return label
0, # push start
5, # push stop
1, # push step
0, # reserve value for result
FUNC_MYRANGE, # push value for resume
][:]
while True:
# value, resume_from = next(IT)
it[FUNC_MYRANGE_RETURN_TO] = FUNC_MYRANGE_RETURN_1
for val in it:
func_stack.append(val)
print('INLINE g{}z'.format(it[FUNC_MYRANGE_RESUME_FROM]))
print('INLINE :{}'.format(FUNC_MYRANGE_RETURN_1))
it = func_stack[len(func_stack)-FUNC_MYRANGE_FRAME_SIZE:] # get iterator frame from the stack
func_stack = func_stack[:len(func_stack)-FUNC_MYRANGE_FRAME_SIZE] # pop the frame
value = it[FUNC_MYRANGE_RESULT_1] # get result
resume_from = it[FUNC_MYRANGE_RESUME_FROM] # get label to resume from
if resume_from < 0:
break
print(value)
# functions go below
print('INLINE g{}z'.format(END))
# def myrange(start, stop, step):
print('INLINE :{}'.format(FUNC_MYRANGE))
frame = func_stack[len(func_stack)-FUNC_MYRANGE_FRAME_SIZE:]
# while start < stop:
while frame[FUNC_MYRANGE_ARG_START] < frame[FUNC_MYRANGE_ARG_STOP]:
# yield start
frame[FUNC_MYRANGE_RESULT_1] = frame[FUNC_MYRANGE_ARG_START]
frame[FUNC_MYRANGE_RESUME_FROM] = FUNC_MYRANGE_RESUME_1
print('INLINE g{}z'.format(frame[FUNC_MYRANGE_RETURN_TO]))
print('INLINE :{}'.format(FUNC_MYRANGE_RESUME_1))
frame = func_stack[len(func_stack)-FUNC_MYRANGE_FRAME_SIZE:]
# start += step
frame[FUNC_MYRANGE_ARG_START] += frame[FUNC_MYRANGE_ARG_STEP]
frame[FUNC_MYRANGE_RESULT_1] = 0
frame[FUNC_MYRANGE_RESUME_FROM] = -1
print('INLINE g{}z'.format(frame[FUNC_MYRANGE_RETURN_TO]))
# end def
print('INLINE :{}'.format(END))
|
{
"content_hash": "ab1905ec65505b1614cc9d9305783034",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 102,
"avg_line_length": 29.443037974683545,
"alnum_prop": 0.6324161650902838,
"repo_name": "Perlence/porcupy",
"id": "ed391a87bd9925e714ddf190e1c02be743fdb383",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playground/range.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "52"
},
{
"name": "Python",
"bytes": "122049"
}
],
"symlink_target": ""
}
|
"""
Check current Husky battery status (via ROS)
usage:
./battery.py [<node IP> <master IP> | -m <metalog> [F]]
"""
import sys
import os
from huskyros import HuskyROS
# apyros should be common lib - now in katarina code
from apyros.sourcelogger import SourceLogger
from apyros.metalog import MetaLog, disableAsserts
def battery( metalog, assertWrite, ipPair ):
if metalog is None:
metalog = MetaLog()
robot = HuskyROS( filename=metalog.getLog("node"), replay=metalog.replay, ipPair=ipPair )
else:
robot = HuskyROS( filename=metalog.getLog("node"), replay=True, assertWrite=assertWrite, ipPair=ipPair ) # TODO move assert to metalog
scannerFn = SourceLogger( sourceGet=None, filename=metalog.getLog("scanner") ).get
robot.setSpeedPxPa( 0, 0 )
for i in xrange(10):
robot.update()
print "Battery: %.3f" % robot.power
if __name__ == "__main__":
if len(sys.argv) < 3:
print __doc__
sys.exit(1)
metalog = None
assertWrite = True
ipPair = None
if sys.argv[1] == '-m':
metalog = MetaLog( filename = sys.argv[2] )
if len(sys.argv) > 3 and sys.argv[3] == 'F':
assertWrite = False
disableAsserts()
else:
ipPair = ( sys.argv[1], 'http://'+sys.argv[2]+':11311' )
battery( metalog, assertWrite, ipPair )
#-------------------------------------------------------------------
# vim: expandtab sw=4 ts=4
|
{
"content_hash": "68d77f9968576bdba3561b713f35bd45",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 142,
"avg_line_length": 31.630434782608695,
"alnum_prop": 0.5972508591065292,
"repo_name": "robotika/husky",
"id": "f64e453df77282b8b4d8a7bce5b078b7e157e97b",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ros/battery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69175"
}
],
"symlink_target": ""
}
|
import sys
import logging
import math
import json
from urlparse import urlparse
from django.conf import settings
from django.utils.html import strip_tags
from elasticsearch import Elasticsearch
from shapely.geometry import box
from hypermap.aggregator.utils import mercator_to_llbbox, get_date
REGISTRY_MAPPING_PRECISION = getattr(settings, "REGISTRY_MAPPING_PRECISION", "500m")
REGISTRY_SEARCH_URL = getattr(settings, "REGISTRY_SEARCH_URL", "elasticsearch+http://localhost:9200")
SEARCH_TYPE = REGISTRY_SEARCH_URL.split('+')[0]
SEARCH_URL = REGISTRY_SEARCH_URL.split('+')[1]
LOGGER = logging.getLogger(__name__)
class ESHypermap(object):
es_url = SEARCH_URL
es = Elasticsearch(hosts=[es_url])
index_name = 'hypermap'
def __init__(self):
# TODO: this create_indices() should not happen here:
# ES creates the indexes automaticaly.
super(ESHypermap, self).__init__()
@staticmethod
def good_coords(coords):
""" passed a string array """
if (len(coords) != 4):
return False
for coord in coords[0:3]:
try:
num = float(coord)
if (math.isnan(num)):
return False
if (math.isinf(num)):
return False
except ValueError:
return False
return True
@staticmethod
def get_domain(url):
urlParts = urlparse(url)
hostname = urlParts.hostname
if hostname == "localhost":
return "Harvard" # assumption
return hostname
@staticmethod
def get_bbox(layer):
candidate_bbox = layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1
if None not in candidate_bbox:
coords = [float(coord) for coord in candidate_bbox]
return coords
wkt = layer.wkt_geometry
# If a coordinate is None and 'POLYGON'
if 'POLYGON' in wkt:
from shapely.wkt import loads
from osgeo import ogr, osr
source = osr.SpatialReference()
source.ImportFromEPSG(3089)
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
transform = osr.CoordinateTransformation(source, target)
point = ogr.CreateGeometryFromWkt(wkt)
point.Transform(transform)
wkt = point.ExportToWkt()
return loads(wkt).bounds
return (-180.0, -90.0, 180.0, 90.0)
@staticmethod
def layer_to_es(layer, with_bulk=False):
category = None
username = None
LOGGER.info("Elasticsearch: record to save: [%s] %s" % (layer.catalog.slug, layer.id))
try:
bbox = ESHypermap.get_bbox(layer)
for proj in layer.service.srs.values():
if proj['code'] in ('102113', '102100'):
bbox = mercator_to_llbbox(bbox)
if (ESHypermap.good_coords(bbox)) is False:
LOGGER.debug('Elasticsearch: There are not valid coordinates for this layer ', layer.title)
LOGGER.error('Elasticsearch: There are not valid coordinates for layer id: %s' % layer.id)
return False
if (ESHypermap.good_coords(bbox)):
minX = bbox[0]
minY = bbox[1]
maxX = bbox[2]
maxY = bbox[3]
if (minY > maxY):
minY, maxY = maxY, minY
if (minX > maxX):
minX, maxX = maxX, minX
halfWidth = (maxX - minX) / 2.0
halfHeight = (maxY - minY) / 2.0
area = (halfWidth * 2) * (halfHeight * 2)
if (minX < -180):
minX = -180
if (maxX > 180):
maxX = 180
if (minY < -90):
minY = -90
if (maxY > 90):
maxY = 90
wkt = "ENVELOPE({:f},{:f},{:f},{:f})".format(minX, maxX, maxY, minY)
rectangle = box(minX, minY, maxX, maxY)
domain = ESHypermap.get_domain(layer.service.url)
if hasattr(layer, 'layerwm'):
category = layer.layerwm.category
username = layer.layerwm.username
abstract = layer.abstract
if abstract:
abstract = strip_tags(layer.abstract)
else:
abstract = ''
if layer.service.type == "WM":
originator = username
else:
originator = domain
# we need to remove the exising index in case there is already one
# ESHypermap.es.delete('hypermap', 'layer', layer.id)
# now we add the index
es_record = {
"id": str(layer.id),
"type": 'Layer',
"layer_id": str(layer.id),
"name": layer.name,
"title": layer.title,
"layer_originator": originator,
"service_id": str(layer.service.id),
"service_type": layer.service.type,
"layer_category": category,
"layer_username": username,
"url": layer.url,
"keywords": [kw.name for kw in layer.keywords.all()],
"reliability": layer.reliability,
"recent_reliability": layer.recent_reliability,
"last_status": layer.last_status,
"is_public": layer.is_public,
"availability": "Online",
"location": {
"layer_info": layer.get_absolute_url
},
"abstract": abstract,
"domain_name": layer.service.get_domain,
# "SrsProjectionCode": layer.srs.values_list('code', flat=True),
"min_y": minY,
"min_x": minX,
"max_x": maxX,
"max_y": maxY,
"area": area,
"bbox": wkt,
"centroid_x": rectangle.centroid.x,
"centroid_y": rectangle.centroid.y,
"srs": [srs.encode('utf-8') for srs in layer.service.srs.values_list('code', flat=True)],
"layer_geoshape": {
"type": "envelope",
"coordinates": [
[minX, maxY], [maxX, minY]
]
},
}
es_date, type = get_date(layer)
if es_date is not None:
es_record['layer_date'] = es_date
es_record['layer_datetype'] = type
es_record['registry'] = layer.registry_tags()
if layer.get_tile_url():
es_record['tile_url'] = layer.get_tile_url()
if with_bulk:
es_record = {
"_id": str(layer.id),
"_type": "layer",
"_index": layer.catalog.slug,
"_source": es_record,
}
LOGGER.info(es_record)
# TODO: cache index creation.
ESHypermap.create_indices(layer.catalog.slug)
if not with_bulk:
ESHypermap.es.index(layer.catalog.slug, 'layer', json.dumps(es_record), id=layer.id,
request_timeout=20)
LOGGER.info("Elasticsearch: record saved for layer with id: %s" % layer.id)
return True, None
# If we want to index with bulk we need to return the layer dictionary.
return es_record
except Exception, e:
LOGGER.error(e, exc_info=True)
LOGGER.error("Elasticsearch: Error saving record for layer with id: %s - %s" % (
layer.id, sys.exc_info()[1]))
return False, sys.exc_info()[1]
@staticmethod
def clear_es():
"""Clear all indexes in the es core"""
# TODO: should receive a catalog slug.
ESHypermap.es.indices.delete(ESHypermap.index_name, ignore=[400, 404])
LOGGER.debug('Elasticsearch: Index cleared')
@staticmethod
def create_indices(catalog_slug):
"""Create ES core indices """
# TODO: enable auto_create_index in the ES nodes to make this implicit.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
# http://support.searchly.com/customer/en/portal/questions/
# 16312889-is-automatic-index-creation-disabled-?new=16312889
mapping = {
"mappings": {
"layer": {
"properties": {
"layer_geoshape": {
"type": "geo_shape",
"tree": "quadtree",
"precision": REGISTRY_MAPPING_PRECISION
}
}
}
}
}
ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
|
{
"content_hash": "8e8caf563b0eee3e86077b2bdb2f5907",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 109,
"avg_line_length": 37.99595141700405,
"alnum_prop": 0.4915290356952584,
"repo_name": "cga-harvard/HHypermap",
"id": "01c9fff68f3ab14ac39c1e1b68d9d27693522a19",
"size": "9385",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hypermap/aggregator/elasticsearch_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93364"
},
{
"name": "HTML",
"bytes": "50409"
},
{
"name": "JavaScript",
"bytes": "2298247"
},
{
"name": "Makefile",
"bytes": "3713"
},
{
"name": "Python",
"bytes": "334877"
},
{
"name": "Shell",
"bytes": "750"
}
],
"symlink_target": ""
}
|
from base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
########## END CACHE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
########## END TEMPLATE CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
########## END EMAIL CONFIGURATION
########## LOGGING CONFIGURATION
# Break tests when a datetime receives a naive datetime
import warnings
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
########## END LOGGING CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"tL&JYCEY57c]g^Vh^cMqg2(/g2*+^ou5=QBM7[9C&x3\G;1uS+"
########## END SECRET CONFIGURATION
########## MEDIA CONFIGURATION
MEDIA_ROOT = project_path('media')
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
STATIC_ROOT = project_path('static')
########## END STATIC FILE CONFIGURATION
########## COMPRESSOR CONFIGURATION
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = []
########## END CONFIGURATION
|
{
"content_hash": "b62faa0a8483a7bfce500b5b8654e6b7",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 75,
"avg_line_length": 25.86111111111111,
"alnum_prop": 0.6493018259935553,
"repo_name": "GotlingSystem/apnea",
"id": "f6ee7b76472c009483cd06c4efaa79085adcea06",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apnea/settings/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60340"
},
{
"name": "JavaScript",
"bytes": "2161"
},
{
"name": "Python",
"bytes": "45697"
}
],
"symlink_target": ""
}
|
import wx
from wx import Colour
from ..controller.cellinfo import CellType
# this import fails in HUDSON
# from wxPython._gdi import wxFONTWEIGHT_BOLD, wxFONTWEIGHT_NORMAL
# wxFONTWEIGHT_BOLD = 92
# wxFONTWEIGHT_NORMAL = 90
# DEBUG using wx.FONTWEIGHT_BOLD, wx.FONTWEIGHT_NORMAL
class Colorizer(object):
def __init__(self, grid, controller):
self._grid = grid
self._controller = controller
self._colors = ColorizationSettings(grid.settings)
self._current_task_id = 0
self._timer = None
def close(self):
self._grid = None
def colorize(self, selection_content):
self._current_task_id += 1
if self._timer is None:
self._timer = wx.CallLater(1, self._coloring_task, self._current_task_id, selection_content)
else:
self._timer.Restart(50, self._current_task_id, selection_content)
def _coloring_task(self, task_index, selection_content, row=0, col=0):
if task_index != self._current_task_id or self._grid is None:
return
if row >= self._grid.NumberRows:
self._grid.ForceRefresh()
elif col < self._grid.NumberCols:
self._colorize_cell(row, col, selection_content)
wx.CallAfter(self._coloring_task, task_index, selection_content, row, col+1)
else:
self._coloring_task(task_index, selection_content, row+1, 0)
def _colorize_cell(self, row, col, selection_content):
cell_info = self._controller.get_cell_info(row, col)
if cell_info is None:
self._set_default_colors(row, col)
return
self._grid.SetCellTextColour(row, col, self._get_text_color(cell_info))
self._grid.SetCellBackgroundColour(row, col, self._get_background_color(cell_info, selection_content))
self._grid.SetCellFont(row, col, self._get_cell_font(row, col, cell_info))
def _set_default_colors(self, row, col):
self._grid.SetCellTextColour(row, col, self._colors.DEFAULT_TEXT)
self._grid.SetCellBackgroundColour(row, col, self._colors.DEFAULT_BACKGROUND)
def _get_text_color(self, cell_info):
return self._colors.get_text_color(cell_info.content_type)
def _get_background_color(self, cell_info, selection_content):
if cell_info.matches(selection_content):
return self._colors.get_highlight_color()
if cell_info.has_error():
return self._colors.get_error_color()
return self._colors.get_background_color(cell_info.cell_type)
def _get_cell_font(self, row, col, cell_info):
font = self._grid.GetCellFont(row, col)
font.SetWeight(self._get_weight(cell_info))
return font
def _get_weight(self, cell_info):
if cell_info.cell_type == CellType.KEYWORD:
return wx.FONTWEIGHT_BOLD
return wx.FONTWEIGHT_NORMAL
class ColorizationSettings(object):
DEFAULT_TEXT = '' # Colour('black') # Colour(7, 0, 70) # 'black'
DEFAULT_BACKGROUND = '' # 'light grey' # Colour('light grey') # Colour(200, 222, 40) # 'white'
def __init__(self, settings=None):
self._settings = settings
def get_background_color(self, type):
if not self._settings:
return self.DEFAULT_BACKGROUND
return self._get('background %s' % type)
def get_text_color(self, type):
if not self._settings:
return self.DEFAULT_TEXT
return self._get('text %s' % type)
def get_highlight_color(self):
return self.get_background_color('highlight')
def get_error_color(self):
return self.get_background_color('error')
def _get(self, name):
return self._settings[name.lower().replace('_', ' ')]
|
{
"content_hash": "9881f4cc795483519b52aaacd057ebfe",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 110,
"avg_line_length": 36.36893203883495,
"alnum_prop": 0.6364121729845168,
"repo_name": "HelioGuilherme66/RIDE",
"id": "b6e8656c48e4e12bf3b5db5d52087af3d524cc15",
"size": "4389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/editor/gridcolorizer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
import sys
import warnings
import platform
sys.path.append('.')
true = True
false = False
none = None
INT_SIZE = 4 # on all tested platforms
def get_dword_size():
# ugly but portable
(arch, exe) = platform.architecture()
if arch.startswith('64'):
return 8
else:
return 4
class jemalloc:
def __init__(self, chunks = [], chunk_size = 0, \
arenas = [], narenas = 0, runs = [], nbins = 0, \
magrack_size = 0, magaz_flag = false, \
standalone_flag = false):
self.chunks = chunks
self.chunk_size = chunk_size
self.arenas = arenas
self.narenas = narenas
self.nbins = nbins
self.ntbins = 0
self.nsbins = 0
self.nqbins = 0
self.magrack_size = magrack_size
self.DWORD_SIZE = get_dword_size()
self.runs = runs
self.MAGAZINES = magaz_flag
self.STANDALONE = standalone_flag
def __str__(self):
if self.MAGAZINES == false:
return '[shadow] [jemalloc] [arenas %02d] [bins %02d]' \
' [runs %02d]' % (self.narenas, self.nbins, len(self.runs))
else:
return '[shadow] [jemalloc] [arenas %02d] [bins %02d] ' \
'[runs %02d] [magazine rack/tcache size %04d]' % \
(self.narenas, self.nbins, len(self.runs), self.magrack_size)
class arena_chunk:
def __init__(self, addr = 0, arena_addr = 0):
self.addr = addr
self.arena_addr = arena_addr
def __str__(self):
if self.arena_addr != 0:
return '[shadow] [chunk 0x%08x] [arena 0x%08x]' % \
(self.addr, self.arena_addr)
else:
return '[shadow] [chunk 0x%08x] [orphan]' % (self.addr)
class arena_run:
def __init__(self, start = 0, end = 0, size = 0, bin_addr = 0, \
region_size = 0, reg0_offset = 0, total_regions = 0, \
free_regions = 0, regions = []):
self.start = start
self.end = end
self.size = size
self.bin_addr = bin_addr
self.region_size = region_size
self.reg0_offset = reg0_offset
self.total_regions = total_regions
self.free_regions = free_regions
self.regions = regions
self.regs_mask = ''
def __str__(self):
return '[shadow] [run 0x%08x] [size %06d] [bin 0x%08x] [region size %04d] ' \
'[total regions %04d] [free regions %04d]' % \
(self.start, self.size, self.bin_addr, \
self.region_size, self.total_regions, self.free_regions)
class arena_bin:
def __init__(self, addr = 0, index = 0, runcur = none):
self.addr = addr
self.index = index
self.run = runcur
def __str__(self):
return '[shadow] [bin %02d (0x%08x)] [size class %04d] [runcur 0x%08x]' % \
(self.index, self.addr, self.run.region_size, self.run.start)
class region:
def __init__(self, index = 0, addr = 0, is_free = 1):
self.index = index
self.addr = addr
self.is_free = is_free
self.content_preview = ''
def __str__(self):
str = '[shadow] [region %03d]' % (self.index)
if self.is_free == 1:
str += ' [free]'
elif self.is_free == 0:
str += ' [used]'
if self.content_preview != '':
str += ' [0x%08x] [%s]' % (self.addr, self.content_preview)
else:
str += ' [0x%08x]' % (self.addr)
return str
class arena:
def __init__(self, addr = 0, index = 0, bins = []):
self.addr = addr
self.index = index
self.bins = bins
def __str__(self):
return '[shadow] [arena %02d (0x%08x)] [bins %02d]' % \
(self.index, self.addr, len(self.bins))
class address_info:
def __init__(self, addr = 0, arena_addr = 0, parent_run = none, \
current_run_flag = false, parent_region = none, chunk_addr = 0):
self.addr = addr
self.arena_addr = arena_addr
self.parent_run = parent_run
self.current_run_flag = current_run_flag
self.parent_region = parent_region
self.chunk_addr = chunk_addr
def __str__(self):
str = ''
found = false
if self.addr != 0:
str += '[shadow] address 0x%08x\n' % (self.addr)
if self.arena_addr != 0:
str += '[shadow] parent arena 0x%08x\n' % (self.arena_addr)
found = true
if self.chunk_addr != 0:
str += '[shadow] parent chunk 0x%08x\n' % (self.chunk_addr)
found = true
if self.parent_run:
str += '[shadow] parent run 0x%08x\n' % (self.parent_run.start)
found = true
if self.current_run_flag == true:
str += '[shadow] run 0x%08x is the current run of bin 0x%08x\n' \
% (self.parent_run.start, self.parent_run.bin_addr)
found = true
if self.parent_region:
str += '[shadow] address 0x%08x belongs to region 0x%08x' \
% (self.addr, self.parent_region.addr)
str += ' (size class %04d)\n' % (self.parent_run.region_size)
str += '%s\n' % (self.parent_run.__str__())
str += self.parent_region.__str__()
found = true
if found == false:
str = '[shadow] address 0x%08x not found in the jemalloc-managed heap' % (self.addr)
return str
# unit testing
if __name__ == '__main__':
print('[shadow] unit testing not implemented yet')
sys.exit()
# EOF
|
{
"content_hash": "b5c2cb3193aa69eed1d554f61d5a9c5a",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 96,
"avg_line_length": 27.886138613861387,
"alnum_prop": 0.5208592224391976,
"repo_name": "codercold/shadow",
"id": "5918b429ba63f776e8719bc0fc6919d007defce8",
"size": "5670",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jemalloc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "64179"
}
],
"symlink_target": ""
}
|
"""
Functions used to load commonly available datasets.
"""
import cPickle
import gzip
import logging
import numpy as np
import os
import sys
import tarfile
import urllib2
logger = logging.getLogger(__name__)
def _valid_path_append(path, *args):
"""
Helper to validate passed path directory and append any subsequent
filename arguments.
Arguments:
path (str): Initial filesystem path. Should expand to a valid
directory.
*args (list, optional): Any filename or path suffices to append to path
for returning.
Returns:
(list, str): path prepended list of files from args, or path alone if
no args specified.
Raises:
ValueError: if path is not a valid directory on this filesystem.
"""
full_path = os.path.expanduser(path)
res = []
if not os.path.exists(full_path):
os.makedirs(full_path)
if not os.path.isdir(full_path):
raise ValueError("path: {0} is not a valid directory".format(path))
for suffix_path in args:
res.append(os.path.join(full_path, suffix_path))
if len(res) == 0:
return path
elif len(res) == 1:
return res[0]
else:
return res
def fetch_dataset(url, sourcefile, destfile, totalsz):
"""
Download the file specified by the given URL.
Args:
url (str): Base URL of the file to be downloaded.
sourcefile (str): Name of the source file.
destfile (str): Path to the destination.
totalsz (int): Size of the file to be downloaded.
"""
cloudfile = urllib2.urlopen(os.path.join(url, sourcefile))
print("Downloading file: {}".format(destfile))
blockchar = u'\u2588' # character to display in progress bar
with open(destfile, 'wb') as f:
data_read = 0
chunksz = 1024**2
while 1:
data = cloudfile.read(chunksz)
if not data:
break
data_read = min(totalsz, data_read + chunksz)
progress_string = u'Download Progress |{:<50}| '.format(
blockchar * int(float(data_read) / totalsz * 50))
sys.stdout.write('\r')
sys.stdout.write(progress_string.encode('utf-8'))
sys.stdout.flush()
f.write(data)
print("Download Complete")
def load_mnist(path=".", normalize=True):
"""
Fetch the MNIST dataset and load it into memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
normalize (bool, optional): whether to scale values between 0 and 1.
Defaults to True.
Returns:
tuple: Both training and test sets are returned.
"""
mnist = dataset_meta['mnist']
filepath = _valid_path_append(path, mnist['file'])
if not os.path.exists(filepath):
fetch_dataset(mnist['url'], mnist['file'], filepath, mnist['size'])
with gzip.open(filepath, 'rb') as mnist:
(X_train, y_train), (X_test, y_test) = cPickle.load(mnist)
X_train = X_train.reshape(-1, 784)
X_test = X_test.reshape(-1, 784)
if normalize:
X_train = X_train / 255.
X_test = X_test / 255.
return (X_train, y_train), (X_test, y_test), 10
def _compute_zca_transform(imgs, filter_bias=0.1):
"""
Compute the zca whitening transform matrix
"""
logger.info("Computing ZCA transform matrix")
meanX = np.mean(imgs, 0)
covX = np.cov(imgs.T)
D, E = np.linalg.eigh(covX)
assert not np.isnan(D).any()
assert not np.isnan(E).any()
assert D.min() > 0
D = D ** -.5
W = np.dot(E, np.dot(np.diag(D), E.T))
return meanX, W
def zca_whiten(train, test, cache=None):
"""
Use train set statistics to apply the ZCA whitening transform to
both train and test sets.
"""
if cache and os.path.isfile(cache):
with open(cache, 'rb') as f:
(meanX, W) = cPickle.load(f)
else:
meanX, W = _compute_zca_transform(train)
if cache:
logger.info("Caching ZCA transform matrix")
with open(cache, 'wb') as f:
cPickle.dump((meanX, W), f)
logger.info("Applying ZCA whitening transform")
train_w = np.dot(train - meanX, W)
test_w = np.dot(test - meanX, W)
return train_w, test_w
def global_contrast_normalize(X, scale=1., min_divisor=1e-8):
"""
Subtract mean and normalize by vector norm
"""
X = X - X.mean(axis=1)[:, np.newaxis]
normalizers = np.sqrt((X ** 2).sum(axis=1)) / scale
normalizers[normalizers < min_divisor] = 1.
X /= normalizers[:, np.newaxis]
return X
def load_cifar10(path=".", normalize=True, contrast_normalize=False, whiten=False):
"""
Fetch the CIFAR-10 dataset and load it into memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
normalize (bool, optional): Whether to scale values between 0 and 1.
Defaults to True.
Returns:
tuple: Both training and test sets are returned.
"""
cifar = dataset_meta['cifar-10']
workdir, filepath = _valid_path_append(path, '', cifar['file'])
batchdir = os.path.join(workdir, 'cifar-10-batches-py')
if not os.path.exists(os.path.join(batchdir, 'data_batch_1')):
if not os.path.exists(filepath):
fetch_dataset(cifar['url'], cifar['file'], filepath, cifar['size'])
with tarfile.open(filepath, 'r:gz') as f:
f.extractall(workdir)
train_batches = [os.path.join(batchdir, 'data_batch_' + str(i)) for i in range(1, 6)]
Xlist, ylist = [], []
for batch in train_batches:
with open(batch, 'rb') as f:
d = cPickle.load(f)
Xlist.append(d['data'])
ylist.append(d['labels'])
X_train = np.vstack(Xlist)
y_train = np.vstack(ylist)
with open(os.path.join(batchdir, 'test_batch'), 'rb') as f:
d = cPickle.load(f)
X_test, y_test = d['data'], d['labels']
y_train = y_train.reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
if contrast_normalize:
norm_scale = 55.0 # Goodfellow
X_train = global_contrast_normalize(X_train, scale=norm_scale)
X_test = global_contrast_normalize(X_test, scale=norm_scale)
if normalize:
X_train = X_train / 255.
X_test = X_test / 255.
if whiten:
zca_cache = os.path.join(workdir, 'cifar-10-zca-cache.pkl')
X_train, X_test = zca_whiten(X_train, X_test, cache=zca_cache)
return (X_train, y_train), (X_test, y_test), 10
def load_babi(path=".", task='qa1_single-supporting-fact', subset='en'):
"""
Fetch the Facebook bAbI dataset and load it to memory.
Args:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
task (str, optional): bAbI task to load
subset (str, optional): Data comes in English, Hindi, or Shuffled
characters. Options are 'en', 'hn', and
'shuffled' for 1000 training and test
examples or 'en-10k', 'hn-10k', and
'shuffled-10k' for 10000 examples.
Returns:
tuple: training and test files are returned
"""
babi = dataset_meta['babi']
workdir, filepath = _valid_path_append(path, '', babi['file'])
if not os.path.exists(filepath):
fetch_dataset(babi['url'], babi['file'], filepath, babi['size'])
babi_dir_name = babi['file'].split('.')[0]
task = babi_dir_name + '/' + subset + '/' + task + '_{}.txt'
train_file = os.path.join(workdir, task.format('train'))
test_file = os.path.join(workdir, task.format('test'))
if os.path.exists(train_file) is False or os.path.exists(test_file):
with tarfile.open(filepath, 'r:gz') as f:
f.extractall(workdir)
return train_file, test_file
def load_text(dataset, path="."):
"""
Fetch the specified dataset.
Args:
dataset (str): A key that may be used to retrieve metadata associated
with the dataset.
path (str, optional): Working directory in which to cache loaded data.
Defaults to current dir if not specified.
Returns:
str: Path to the downloaded dataset.
"""
text_meta = dataset_meta[dataset]
workdir, filepath = _valid_path_append(path, '', text_meta['file'])
if not os.path.exists(filepath):
fetch_dataset(text_meta['url'], text_meta['file'], filepath,
text_meta['size'])
if '.zip' in filepath:
import zipfile
zip_ref = zipfile.ZipFile(filepath)
zip_ref.extractall(workdir)
zip_ref.close()
filepath = filepath.split('.zip')[0]
return filepath
def load_ptb_train(path):
return load_text('ptb-train', path)
def load_ptb_valid(path):
return load_text('ptb-valid', path)
def load_ptb_test(path):
return load_text('ptb-test', path)
def load_hutter_prize(path):
return load_text('hutter-prize', path)
def load_shakespeare(path):
return load_text('shakespeare', path)
def load_flickr8k(path):
return load_text('flickr8k', path)
def load_flickr30k(path):
return load_text('flickr30k', path)
def load_coco(path):
return load_text('coco', path)
def load_i1kmeta(path):
return load_text('i1kmeta', path)
def load_imdb(path):
return load_text('imdb', path)
dataset_meta = {
'mnist': {
'size': 15296311,
'file': 'mnist.pkl.gz',
'url': 'https://s3.amazonaws.com/img-datasets',
'func': load_mnist
},
'cifar-10': {
'size': 170498071,
'file': 'cifar-10-python.tar.gz',
'url': 'http://www.cs.toronto.edu/~kriz',
'func': load_cifar10
},
'babi': {
'size': 11745123,
'file': 'tasks_1-20_v1-2.tar.gz',
'url': 'http://www.thespermwhale.com/jaseweston/babi',
'func': load_babi
},
'ptb-train': {
'size': 5101618,
'file': 'ptb.train.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_train
},
'ptb-valid': {
'size': 399782,
'file': 'ptb.valid.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_valid
},
'ptb-test': {
'size': 449945,
'file': 'ptb.test.txt',
'url': 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data',
'func': load_ptb_test
},
'hutter-prize': {
'size': 35012219,
'file': 'enwik8.zip',
'url': 'http://mattmahoney.net/dc',
'func': load_hutter_prize
},
'shakespeare': {
'size': 4573338,
'file': 'shakespeare_input.txt',
'url': 'http://cs.stanford.edu/people/karpathy/char-rnn',
'func': load_shakespeare
},
'flickr8k': {
'size': 49165563,
'file': 'flickr8k.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_flickr8k
},
'flickr30k': {
'size': 195267563,
'file': 'flickr30k.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_flickr30k
},
'coco': {
'size': 738051031,
'file': 'coco.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/image-caption',
'func': load_coco
},
'i1kmeta': {
'size': 758648,
'file': 'neon_ILSVRC2012_devmeta.zip',
'url': 'https://s3-us-west-1.amazonaws.com/neon-stockdatasets/imagenet',
'func': load_i1kmeta
},
'imdb': {
'size': 33213513,
'file': 'imdb.pkl',
'url': ' https://s3.amazonaws.com/text-datasets',
'func': load_imdb,
}
}
def load_dataset(name, path=".", **kwargs):
"""
Fetch the specified dataset.
Args:
name (str): A key that may be used to retrieve the function that
can be used to load the dataset.
path (str, optional): Local cache directory to load the dataset into.
Defaults to current working directory.
Returns:
tuple: Both training and test sets are returned. The return value
also contains the number of classes in the dataset.
"""
if name in dataset_meta:
if 'func' not in dataset_meta[name]:
raise ValueError('function not specified for loading %s' % name)
func = dataset_meta[name]['func']
else:
try:
dataset_module = __import__(name)
except ImportError:
raise ValueError('dataset handler not found: %s' % name)
func = dataset_module.load_data
return func(path, **kwargs)
|
{
"content_hash": "64ecbd7e3ef6c4bb399c88b32b534fa5",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 89,
"avg_line_length": 30.21788990825688,
"alnum_prop": 0.5766223908918406,
"repo_name": "Bam4d/neon",
"id": "90cd7407fd8cdd4c5c53bb9aeee87e8a5e88560f",
"size": "13916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neon/data/datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "46049"
},
{
"name": "CSS",
"bytes": "698005"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "11128"
},
{
"name": "Python",
"bytes": "1134464"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="scatterternary.marker.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "19c447d52ce1a8968c0a674360b1c908",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 33.857142857142854,
"alnum_prop": 0.6012658227848101,
"repo_name": "plotly/python-api",
"id": "e109339b20e21a968be344e7687d9e73a46d1900",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/marker/line/_widthsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.contenttypes import generic
from djangregator.models import *
class TimelineEntryAdmin(admin.ModelAdmin):
list_display = ('published', 'content_type')
date_hierarchy = 'published'
list_filter = ('content_type',)
class ActivityEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'published'
class GenericServiceAccountAdmin(admin.StackedInline):
extra = 1
class TwitterAccountAdmin(GenericServiceAccountAdmin):
model = TwitterAccount
class DeliciousAccountAdmin(GenericServiceAccountAdmin):
model = DeliciousAccount
class FlickrAccountAdmin(GenericServiceAccountAdmin):
model = FlickrAccount
class OnlinePersonaAdmin(admin.ModelAdmin):
inlines = (
TwitterAccountAdmin,
DeliciousAccountAdmin,
FlickrAccountAdmin
)
admin.site.register(TimelineEntry, TimelineEntryAdmin)
admin.site.register(OnlinePersona, OnlinePersonaAdmin)
# Service-specific models
admin.site.register(TwitterStatus, ActivityEntryAdmin)
admin.site.register(DeliciousLink, ActivityEntryAdmin)
admin.site.register(FlickrPhoto, ActivityEntryAdmin)
|
{
"content_hash": "23555eccb1e9731b55c8b3c370c58d0c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 56,
"avg_line_length": 29.86842105263158,
"alnum_prop": 0.7894273127753304,
"repo_name": "idan/djangregator",
"id": "ce14f6e3338975f5af9ad00f4ebbe596acd67b1e",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangregator/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36748"
}
],
"symlink_target": ""
}
|
"""The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <guido@python.org>, "
"Mike Verdone <mike.verdone@gmail.com>, "
"Mark Russell <mark.russell@zen.co.uk>, "
"Antoine Pitrou <solipsis@pitrou.net>, "
"Amaury Forgeot d'Arc <amauryfa@gmail.com>, "
"Benjamin Peterson <benjamin@python.org>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import abc
# For the time being, import everything via _jyio instead of from _io directly
import _jyio
from _jyio import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open,
FileIO,
BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _jyio.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_jyio._IOBase):
__metaclass__ = abc.ABCMeta
class RawIOBase(_jyio._RawIOBase, IOBase):
pass
class BufferedIOBase(_jyio._BufferedIOBase, IOBase):
pass
class TextIOBase(_jyio._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
|
{
"content_hash": "4704861bf64541bb862d9597d117d235",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 80,
"avg_line_length": 37.13861386138614,
"alnum_prop": 0.7278059184217542,
"repo_name": "EnviroCentre/jython-upgrade",
"id": "4102ad62d568900ff6360f5f3de3c75e8dbec9e9",
"size": "4114",
"binary": false,
"copies": "17",
"ref": "refs/heads/develop",
"path": "jython/lib/io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "112532"
},
{
"name": "NSIS",
"bytes": "1982"
},
{
"name": "PowerShell",
"bytes": "216"
},
{
"name": "Python",
"bytes": "15437225"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.