text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
sentry.web.frontend.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import posixpath
from django.conf import settings
from django.http import HttpResponseNotFound, Http404
from django.contrib.staticfiles import finders
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
from django.views.generic import TemplateView as BaseTemplateView
from sentry.web.helpers import render_to_response
FOREVER_CACHE = 'max-age=315360000'
NEVER_CACHE = 'max-age=0, no-cache, no-store, must-revalidate'
def dev_favicon(request):
document_root, path = resolve('sentry/images/favicon_dev.png')
return static.serve(request, path, document_root=document_root)
def resolve(path):
# Mostly yanked from Django core and changed to return the path:
# See: https://github.com/django/django/blob/1.6.11/django/contrib/staticfiles/views.py
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
try:
absolute_path = finders.find(normalized_path)
except Exception:
# trying to access bad paths like, `../../etc/passwd`, etc that
# Django rejects, but respond nicely instead of erroring.
absolute_path = None
if not absolute_path:
raise Http404("'%s' could not be found" % path)
if path[-1] == '/' or os.path.isdir(absolute_path):
raise Http404('Directory indexes are not allowed here.')
return os.path.split(absolute_path)
def static_media(request, **kwargs):
"""
Serve static files below a given point in the directory structure.
"""
module = kwargs.get('module')
path = kwargs.get('path', '')
version = kwargs.get('version')
if module:
path = '%s/%s' % (module, path)
try:
document_root, path = resolve(path)
except Http404:
# Return back a simpler plain-text 404 response, more suitable
# for static files, rather than our full blown HTML.
return HttpResponseNotFound('', content_type='text/plain')
if 'gzip' in request.META.get('HTTP_ACCEPT_ENCODING', ''
) and not path.endswith('.gz') and not settings.DEBUG:
paths = (path + '.gz', path)
else:
paths = (path, )
for p in paths:
try:
response = static.serve(request, p, document_root=document_root)
break
except Http404:
# We don't need to handle this since `resolve()` is assuring to us that
# at least the non-gzipped version exists, so in theory, this can
# only happen on the first .gz path
continue
# Make sure we Vary: Accept-Encoding for gzipped responses
response['Vary'] = 'Accept-Encoding'
# We need CORS for font files
if path.endswith(('.js', '.ttf', '.ttc', '.otf', '.eot', '.woff', '.woff2')):
response['Access-Control-Allow-Origin'] = '*'
# If we have a version and not DEBUG, we can cache it FOREVER
if version is not None and not settings.DEBUG:
response['Cache-Control'] = FOREVER_CACHE
else:
# Otherwise, we explicitly don't want to cache at all
response['Cache-Control'] = NEVER_CACHE
return response
class TemplateView(BaseTemplateView):
def render_to_response(self, context, **response_kwargs):
return render_to_response(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
|
{
"content_hash": "d75fb4fbeb5c64d0aed40ebfb7723596",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 91,
"avg_line_length": 34.339622641509436,
"alnum_prop": 0.6478021978021978,
"repo_name": "ifduyue/sentry",
"id": "097895ee80a78902ebe7b67789ed2b11701361f1",
"size": "3640",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/generic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
import json
import os
import urllib
from pureelk.arraycontext import ArrayContext
STATE_FILE = ".pureelk.arrays.state"
class Store(object):
def __init__(self, path, logger):
"""
Store class for saving and loading of states
The current implementation is file-based since the data set is tiny
Each array's config is stored in its own .json file. The worker also
write the current task state into a .pureelk.state file each time it runs.
:param path: The path of the folder containing all the configs
:return: dictionary of arrays, index by array id.
"""
self._path = path
self._logger = logger
def load_arrays(self):
arrays = {}
# Load all the json configs for the arrays.
for file_name in os.listdir(self._path):
if file_name.endswith(".json"):
try:
array = self._load_config_one(file_name)
arrays[array.id] = array
except Exception as e:
self._logger.warn("Exception at loading config {}: {}".format(file_name, e))
try:
# Load the arrays execution state and merge them in.
for array_state in self._load_state():
array_id = array_state[ArrayContext.ID]
if array_id in arrays:
arrays[array_id].update_state_json(array_state)
except Exception as e:
self._logger.warn("Exception at loading execution state {}".format(e))
return arrays
def save_array_states(self, arrays):
with open(os.path.join(self._path, STATE_FILE), 'w') as state_file:
state_file.write(json.dumps([a.get_state_json() for a in arrays]))
def save_array_config(self, array):
file_name = os.path.join(self._path, urllib.unquote(array.id) + ".json")
with open(file_name, "w") as config_file:
config_file.write(json.dumps(array.get_config_json()))
def remove_array_config(self, id):
file_name = os.path.join(self._path, urllib.unquote(id) + ".json")
try:
os.remove(file_name)
except OSError as error:
self._logger.warn("Error when removing array '{}': {}".format(id, error))
def _load_config_one(self, filename):
path = os.path.join(self._path, filename)
if os.path.exists(path):
array = ArrayContext()
with open(path) as json_file:
json_object = json.load(json_file)
array.update_config_json(json_object)
# TODO: We use file name as id if it is not present in the JSON.
if not array.id:
array.id = urllib.quote(os.path.splitext(filename)[0])
self._logger.info("Loaded config = {}".format(json_object))
return array
raise ValueError("Array config {} not found".format(filename))
def _load_state(self):
path = os.path.join(self._path, STATE_FILE)
state = []
self._logger.info("loading state from {}".format(path))
if os.path.exists(path):
with open(path) as state_file:
state = json.load(state_file)
self._logger.info("Loaded state = {}".format(state))
return state
|
{
"content_hash": "f24f2f15c791d6c8ef8a4554cb4da8d6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 96,
"avg_line_length": 38.275862068965516,
"alnum_prop": 0.5798798798798799,
"repo_name": "pureelk/pureelk",
"id": "0900ac99a5cc87b8caf6f29b9e5069734a77d0e8",
"size": "3330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "container/worker/pureelk/store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2280"
},
{
"name": "HTML",
"bytes": "12345"
},
{
"name": "JavaScript",
"bytes": "13086"
},
{
"name": "Python",
"bytes": "36178"
},
{
"name": "Shell",
"bytes": "9277"
}
],
"symlink_target": ""
}
|
__author__ = 'farooq.sheikh'
from setuptools import setup, find_packages
setup(
name = 'asposepdfexamples',
packages = find_packages(),
version = '1.0.1',
description = 'Aspose.Pdf Cloud SDK for Python allows you to use Aspose.Pdf APIs in your Python applications',
author='Farooq Sheikh',
author_email='farooq.sheikh@aspose.com',
url='http://www.aspose.com/cloud/pdf-api.aspx',
install_requires=[
'asposestoragecloud','asposepdfcloud'
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
{
"content_hash": "6440e98811a8e3c288db101ce8922839",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 31.772727272727273,
"alnum_prop": 0.6409155937052933,
"repo_name": "asposepdf/Aspose_Pdf_Cloud",
"id": "fea654a976774c9ea1af019a59dcfa978cbd7f0a",
"size": "699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "199"
},
{
"name": "C#",
"bytes": "360118"
},
{
"name": "HTML",
"bytes": "2548"
},
{
"name": "Java",
"bytes": "308541"
},
{
"name": "JavaScript",
"bytes": "222677"
},
{
"name": "Objective-C",
"bytes": "410877"
},
{
"name": "PHP",
"bytes": "223730"
},
{
"name": "Python",
"bytes": "297747"
},
{
"name": "Ruby",
"bytes": "281079"
},
{
"name": "XSLT",
"bytes": "888"
}
],
"symlink_target": ""
}
|
import json
import math
from flask import Flask, request
from flask import render_template
from flask.json import jsonify
app = Flask(__name__, static_url_path='/static')
_file = open("data.csv")
data = map(lambda x: x[:-1].replace("'","").split(","), _file.readlines())
order = [0, 2, 5, 14, 3, 16, 13, 6, 11]
#order = [0, 2, 5, 14, 3, 16, 13, 6, 11, 10, 8, 18, 17, 15, 1, 12]
def message(m, e):
return m, e, {'Access-Control-Allow-Origin': '*'}
def get_good_bad(classes, rows, traverse=[], indices=[]):
good = []
bad = []
good_num = 0
bad_num = 0
for i in rows:
_data = []
for index in indices:
_data.append(data[i][index])
if _data==traverse:
if classes[i] == "good":
good.append(i)
good_num += 1
else:
bad.append(i)
bad_num += 1
gain = -float("inf")
split = None
"""
for column in columns[13:]:
if columns.index(column) not in indices:
freq={}
for r in range(len(column)):
if column[r] in freq:
freq[r][0 if columns[-1]=="good" else 1] += 1
else:
if columns[-1]=="good":
freq[r] = [2, 1]
else:
freq[r] = [1, 2]
g = 0
if good_num > 0:
g = -sum([ float(freq[i][0])/good_num * math.log(float(freq[i][0])/good_num) for i in freq.iterkeys() ])
if bad_num > 0:
g -= sum([ float(freq[i][1])/bad_num* math.log(float(freq[i][1])/bad_num) for i in freq.iterkeys() ])
if g > gain:
gain = g
split = r
"""
return good, bad, good_num, bad_num, split
def tree_build(node, columns, order, index, rows, traverse=[], indices=[]):
node["_children"] = []
print index
for unique in set(columns[index]):
print unique, index
good, bad, good_num, bad_num, split = get_good_bad(columns[-1], rows, traverse+[unique], indices+[index])
entry = {"name": unique, "parent": node["name"], "percentage": [good_num, bad_num], "value":traverse+[unique]}
node["_children"].append(entry)
if order.index(index)+1 < len(order):
tree_build(entry, columns, order, order[order.index(index)+1], good+bad, traverse+[unique], indices+[index])
def tree():
columns = zip(*data)
good, bad, good_num, bad_num, split = get_good_bad(columns[-1], range(len(data)))
root = {"name": "root", "parent": "null", "percentage": [good_num, bad_num], "value":[]}
tree_build(root, columns, order, order[0], range(len(data)))
return root
@app.route("/")
def index():
return render_template("index.html")
@app.route("/data")
def getdata():
return message(jsonify(json.load(open("tree.json"))), 200)
@app.route("/match")
def match():
_data = request.args.get("q").split(",")
result = {"data":[]}
for row in data:
_is_match = True
for i in range(len(_data)):
if _data[i] != row[order[i]]:
_is_match = False
break
if _is_match:
result["data"].append([row[x] for x in order])
return message(jsonify(result), 200)
@app.route("/init")
def init():
json.dump(tree(), open("tree.json", "w"))
return "success"
if __name__ == "__main__":
#init()
app.run(host='0.0.0.0',port=5000,debug=True)
|
{
"content_hash": "8d2f36f59a2b804d84f49cf5c647f62c",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 112,
"avg_line_length": 26.68141592920354,
"alnum_prop": 0.6006633499170813,
"repo_name": "alseambusher/viza",
"id": "7037158ea38ea937e89d2a2d28debb2a94e631af",
"size": "3015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8274"
},
{
"name": "JavaScript",
"bytes": "24904"
},
{
"name": "Python",
"bytes": "3015"
}
],
"symlink_target": ""
}
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as mexican hat or the second derivative of a gaussian) is not a particularily
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print __doc__
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
{
"content_hash": "85b63097d50263036e6d84084663d79f",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 39.649484536082475,
"alnum_prop": 0.6266250650026001,
"repo_name": "mrshu/scikit-learn",
"id": "844eb552fb99148c08f9011236ed222271bc67f7",
"size": "3846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/decomposition/plot_sparse_coding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10173894"
},
{
"name": "C++",
"bytes": "435332"
},
{
"name": "JavaScript",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "3532352"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
}
|
default_app_config = 'student.apps.apps.AppConfig'
|
{
"content_hash": "5fcd8124d68a4aec989af06a22e1725f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 50,
"alnum_prop": 0.8,
"repo_name": "houssemFat/MeeM-Dev",
"id": "c95453cc97878d0a132e616f2dc1f97d142cb054",
"size": "50",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "student/apps/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54148"
},
{
"name": "HTML",
"bytes": "360877"
},
{
"name": "JavaScript",
"bytes": "1651985"
},
{
"name": "Nginx",
"bytes": "1597"
},
{
"name": "PHP",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "374180"
},
{
"name": "Smarty",
"bytes": "7600"
}
],
"symlink_target": ""
}
|
import time, datetime, os
from pyvcloud.vcloudair import VCA
def print_vca(vca):
if vca:
print 'vca token: ', vca.token
if vca.vcloud_session:
print 'vcloud session token: ', vca.vcloud_session.token
print 'org name: ', vca.vcloud_session.org
print 'org url: ', vca.vcloud_session.org_url
print 'organization: ', vca.vcloud_session.organization
else:
print 'vca vcloud session: ', vca.vcloud_session
else:
print 'vca: ', vca
def test_vcloud_session(vca, vdc, vapp):
the_vdc = vca.get_vdc(vdc)
for x in range(1, 1):
print datetime.datetime.now(), the_vdc.get_name(), vca.vcloud_session.token
the_vdc = vca.get_vdc(vdc)
if the_vdc: print the_vdc.get_name(), vca.vcloud_session.token
else: print False
the_vapp = vca.get_vapp(the_vdc, vapp)
if the_vapp: print the_vapp.me.name
else: print False
time.sleep(2)
### On Demand
host='iam.vchs.vmware.com'
username = os.environ['VCAUSER']
password = os.environ['PASSWORD']
instance = 'c40ba6b4-c158-49fb-b164-5c66f90344fa'
org = 'a6545fcb-d68a-489f-afff-2ea055104cc1'
vdc = 'VDC1'
vapp = 'ubu'
#sample login sequence on vCloud Air On Demand
vca = VCA(host=host, username=username, service_type='ondemand', version='5.7', verify=True)
#first login, with password
result = vca.login(password=password)
print_vca(vca)
#then login with password and instance id, this will generate a session_token
result = vca.login_to_instance(password=password, instance=instance, token=None, org_url=None)
print_vca(vca)
#next login, with token, org and org_url, no password, it will retrieve the organization
result = vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, org_url=vca.vcloud_session.org_url)
print_vca(vca)
#this tests the vca token
result = vca.login(token=vca.token)
if result: print result, vca.instances
else: print False
#this tests the vcloud session token
test_vcloud_session(vca, vdc, vapp)
s=vca.get_score_service('https://score.vca.io')
blueprints = s.list()
for blueprint in blueprints:
print blueprint.get('id')
s.upload('os.environ['BPFILE']', 'bp1')
blueprints = s.list()
for blueprint in blueprints:
print blueprint.get('id')
|
{
"content_hash": "e295be38e37b9aafdc7a9129a3fb21e6",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 132,
"avg_line_length": 33.76056338028169,
"alnum_prop": 0.6620775969962454,
"repo_name": "cloudify-cosmo/pyvcloud",
"id": "c46e1b5996f8cb2a9504cf8874827121afb693fc",
"size": "2397",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/login_to_score.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16922884"
}
],
"symlink_target": ""
}
|
import nCore
import sqlite3
from os.path import exists
__author__ = 'pioo'
# TODO find dbfile's place
_dbfile = "ncore.db"
_memdb = ":memory:"
if not exists(_dbfile):
_needs_init = True
else:
_needs_init = False
_conn = sqlite3.connect(_memdb)
_cur = _conn.cursor()
def insert_into_db(torrent):
sqlstatement = 'INSERT INTO TorrentData '
'VALUES(:id, :nev, :alt_nev, :tipus, :img_url, :infolink, :imdbrank, ' \
':meret, :downloaded, :seed, :leech, :date, :feltolto, :status)'
_cur.execute(sqlstatement, {
'id': torrent['id'],
'nev': torrent['nev'],
'alt_nev': torrent['alt_nev'],
'tipus': torrent['tipus'],
'img_url': torrent['img_url'],
'infolink': torrent['infolink'],
'imdbrank': torrent['imdbrank'],
'meret': torrent['meret'],
'downloaded': torrent['downloaded'],
'seed': torrent['seed'],
'leech': torrent['leech'],
'date': torrent['date'],
'feltolto': torrent['feltolto'],
'status': torrent['status']})
_conn.commit()
return
def _create_torrent_db(cur):
cur.execute("CREATE TABLE "
"Params (ParamKey TEXT PRIMARY KEY, ParamValue TEXT)")
cur.execute(
'CREATE TABLE TorrentData ('
'id TEXT PRIMARY KEY, '
'nev TEXT, '
'alt_nev TEXT, '
'tipus TEXT, '
'img_url TEXT, '
'infolink TEXT, '
'imdbrank TEXT, '
'meret TEXT, '
'downloaded NUMBER, '
'seed NUMBER, '
'leech NUMBER, '
'date NUMBER, '
'feltolto TEXT, status TEXT)')
cur.execute("CREATE TABLE Tagek (TagID NUMBER PRIMARY KEY, Tag TEXT)")
cur.execute("CREATE TABLE TorrentTags (TorrentID NUMBER, TagID NUMBER)")
return
def _is_id_available(torrentid):
_cur.execute('SELECT count(id) '
'FROM TorrentData '
'WHERE id = ?', (torrentid, ))
row = _cur.fetchone()
if row[0] == 0:
return True
else:
return False
_create_torrent_db(_cur)
# replace username and password
n1 = nCore.nCore('username', 'password')
print "[+] Logged in"
i = 0
for torrent in n1.get_torrents('', 'xvid_hun'):
#if i >= 33:
# break
if _is_id_available(torrent['id']):
i += 1
print "[+]", i, torrent['id'], torrent['nev']
insert_into_db(torrent)
print "[+] Dumping memory database into file"
query = "".join(line for line in _conn.iterdump())
new_db = sqlite3.connect(_dbfile)
new_db.executescript(query)
|
{
"content_hash": "1e9b324ec37c7717960b17b402b70190",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 76,
"avg_line_length": 27.141304347826086,
"alnum_prop": 0.5802963556267521,
"repo_name": "piooca/libncore",
"id": "41df9480efbd1d4cfe0fd48751db3f026762fc02",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libncore/dbhack.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "24725"
}
],
"symlink_target": ""
}
|
"""
Copyright (C) 2014, Michael Trunner
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of {{ project }} nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import datetime
import wiringpi2
wiringpi2.wiringPiSetup()
class TevionCode(object):
_delta_long = 1200
_delta_short = 600
WIRINGPI_OUTPUT_MODE = 1
COMMAND_CODES = {
0: {
'on': (170, 85),
'off': (169, 86),
'brighter': (169, 154),
'darker': (170, 153)
},
1: {
'on': (86, 86),
'off': (85, 85),
'brighter': (85, 153),
'darker': (86, 154)
},
2: {
'on': (150, 90),
'off': (149, 89),
'brighter': (149, 149),
'darker': (150, 150)
},
3: {
'on': (166, 89),
'off': (165, 90),
'brighter': (165, 150),
'darker': (166, 149)
},
4: {
'on': (102, 85),
'off': (101, 86),
'brighter': (101, 154),
'darker': (102, 153)
}
}
def __init__(self, house_code, pin, adj=1):
self.set_house_code(house_code)
self.pin = pin
self.pin_value = 0
self._init_wiringpi()
self.adj = adj
self.toggles = 0
self.duration = 0
def _init_wiringpi(self):
"""
Initializes the wiringpi pin of the 433 module
"""
wiringpi2.pinMode(self.pin, self.WIRINGPI_OUTPUT_MODE)
def get_controll_code(self, outlet_no, command):
"""
Returns the tevion controll code of the given command for
the given remote outlet.
:return: command
:rtype: tuple
"""
return self.COMMAND_CODES[outlet_no][command]
def _toggle_pin_value(self):
"""
Toggles the internal pin state
"""
if self.pin_value == 1:
self.pin_value = 0
else:
self.pin_value = 1
return self.pin_value
def _get_long_delta(self):
"""
Returns the adjusted delta for a long signal (logical one)
"""
return int(self._delta_long * self.adj)
def _get_short_delta(self):
"""
Returns the adjusted delta for a short signal (logical zero)
"""
return int(self._delta_short * self.adj)
def _send_bit(self, value):
"""
Sends the given logical bit
"""
wiringpi2.digitalWrite(self.pin, self._toggle_pin_value())
if value:
wiringpi2.delayMicroseconds(self._get_long_delta())
self.duration += self._delta_long
else:
wiringpi2.delayMicroseconds(self._get_short_delta())
self.duration += self._delta_short
self.toggles += 1
def set_house_code(self, house_code):
"""
Calculates and sets the internal representation of
the tevion house code.
"""
h = []
for n in house_code:
h.extend(self._bitfield(n))
h.append(1) # Parity hack!?!
self._house_code = h
def _bitfield(self, n):
return [1 if digit == '1' else 0 for digit in '{0:08b}'.format(n)]
def _send_house_code(self):
for h in self._house_code:
self._send_bit(h)
def send_code(self, code):
"""
Sends the given code (tuple)
"""
self._send_house_code()
for c in code:
for bit in self._bitfield(c):
self._send_bit(bit)
def send_command(self, outlet_no, command):
"""
Sends the given command code for the given remote outlet.
"""
self.send_code(self.get_controll_code(outlet_no, command))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Sends 433Mhz codes for the tevion remote control outlet')
parser.add_argument(
'-p', '--pin', type=int, default=1,
help='Number of the wiringpi pin that should be used',
)
parser.add_argument(
'-hc', '--housecode', type=int, nargs=3, default=[77, 42, 170],
help='The Tevion house code of the outlets.\nDefault is 77, 42, 170.')
parser.add_argument(
'command', type=str, choices=['on', 'off', 'brighter', 'darker'])
parser.add_argument(
'outlet', type=int, nargs='?', default=0, choices=range(1, 5),
help='Number of the power outlet, or all if omitted')
parser.add_argument(
'-r', '--repeat', type=int, default=5,
help='Number of time the given code should be send.\n Default is 5.')
parser.add_argument('-d', '--debug', action="store_true",
help='Activates debug output')
parser.add_argument('--adj', type=float, default=1,
help='Adjust the sending speed.')
args = parser.parse_args()
start_time = datetime.datetime.now()
tevion = TevionCode(args.housecode, args.pin, args.adj)
for _i in range(args.repeat):
tevion.send_command(args.outlet, args.command)
if args.debug:
print (datetime.datetime.now() - start_time).total_seconds() * 1000000
print tevion.duration
print tevion.toggles
|
{
"content_hash": "8779a0ea9a7da08d9ffdb37c6fc86328",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 80,
"avg_line_length": 32.16019417475728,
"alnum_prop": 0.586566037735849,
"repo_name": "trunneml/tevionPi",
"id": "3d584a60beef361d082d6b1dc69ed51d8cde1ede",
"size": "6648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "send_tevion.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8618"
}
],
"symlink_target": ""
}
|
import autosar
def create_platform_types(ws):
ws.pushRoles()
package = ws.createPackage('AUTOSAR_Platform')
baseTypes = package.createSubPackage('BaseTypes', role = 'DataType')
package.createSubPackage('CompuMethods', role = 'CompuMethod')
package.createSubPackage('DataConstrs', role = 'DataConstraint')
baseTypes.createSwBaseType('dtRef_const_VOID', 1, encoding = 'VOID', nativeDeclaration = 'void')
baseTypes.createSwBaseType('dtRef_VOID', 1, encoding = 'VOID', nativeDeclaration = 'void')
baseTypes.createSwBaseType('boolean', 8, encoding = 'BOOLEAN', nativeDeclaration='boolean')
baseTypes.createSwBaseType('uint8', 8, nativeDeclaration='uint8')
implTypes = package.createSubPackage('ImplementationDataTypes')
implTypes.createImplementationDataType('boolean', '/AUTOSAR_Platform/BaseTypes/boolean',
valueTable=['FALSE', 'TRUE'], typeEmitter='Platform_Type')
implTypes.createImplementationDataTypePtr('dtRef_const_VOID',
'/AUTOSAR_Platform/BaseTypes/dtRef_const_VOID', swImplPolicy = 'CONST')
implTypes.createImplementationDataTypePtr('dtRef_VOID', '/AUTOSAR_Platform/BaseTypes/dtRef_VOID')
ws.popRoles()
ws.pushRoles()
package = ws.createPackage('Predefined_DEV')
package.createSubPackage('CompuMethods', role = 'CompuMethod')
package.createSubPackage('DataConstrs', role = 'DataConstraint')
implTypes = package.createSubPackage('ImplementationDataTypes')
implTypes.createImplementationDataType('NvM_RequestResultType', '/AUTOSAR_Platform/BaseTypes/uint8',
valueTable=[
'NVM_REQ_OK',
'NVM_REQ_NOT_OK',
'NVM_REQ_PENDING',
'NVM_REQ_INTEGRITY_FAILED',
'NVM_REQ_BLOCK_SKIPPED',
'NVM_REQ_NV_INVALIDATED'])
ws.popRoles()
def create_port_interfaces(ws):
package = ws.createPackage('PortInterfaces', role='PortInterface')
portInterface=package.createClientServerInterface("NvM_RequestResult",
errors = autosar.ApplicationError("E_NOT_OK", 1),
isService=True, operations = [
"EraseBlock",
"GetErrorStatus",
"InvalidateNvBlock",
"ReadBlock",
"SetRamBlockStatus",
"WriteBlock"])
NvM_RequestResultType_ref = "/Predefined_DEV/ImplementationDataTypes/NvM_RequestResultType"
boolean_ref = "/AUTOSAR_Platform/ImplementationDataTypes/boolean"
dtRef_const_VOID_ref = "/AUTOSAR_Platform/ImplementationDataTypes/dtRef_const_VOID"
dtRef_VOID_ref = "/AUTOSAR_Platform/ImplementationDataTypes/dtRef_VOID"
portInterface["EraseBlock"].possibleErrors = "E_NOT_OK"
portInterface["GetErrorStatus"].createOutArgument("ErrorStatus", NvM_RequestResultType_ref,
"NOT-ACCESSIBLE", "USE-ARGUMENT-TYPE")
portInterface["GetErrorStatus"].possibleErrors = "E_NOT_OK"
portInterface["InvalidateNvBlock"].possibleErrors = "E_NOT_OK"
portInterface["ReadBlock"].createInArgument("DstPtr", dtRef_VOID_ref, "NOT-ACCESSIBLE", "USE-ARGUMENT-TYPE")
portInterface["ReadBlock"].possibleErrors = "E_NOT_OK"
portInterface["SetRamBlockStatus"].createInArgument("RamBlockStatus", boolean_ref, "NOT-ACCESSIBLE", "USE-ARGUMENT-TYPE")
portInterface["SetRamBlockStatus"].possibleErrors = "E_NOT_OK"
portInterface["WriteBlock"].createInArgument("SrcPtr", dtRef_const_VOID_ref, "NOT-ACCESSIBLE", "USE-ARGUMENT-TYPE")
portInterface["WriteBlock"].possibleErrors = "E_NOT_OK"
def create_components(ws):
package = ws.createPackage('ComponentTypes', role='ComponentType')
comp1 = package.createServiceComponent('ServerComponent')
comp1.createProvidePort('Nvm_PersonalSettings', 'NvM_RequestResult')
comp2 = package.createApplicationSoftwareComponent('ClientComponent')
comp2.createRequirePort('Nvm_PersonalSettings', 'NvM_RequestResult')
ws = autosar.workspace(version="4.2.2")
create_platform_types(ws)
create_port_interfaces(ws)
create_components(ws)
ws.saveXML('ServerComponent.arxml', filters=['/ComponentTypes/ServerComponent'])
ws.saveXML('ClientComponent.arxml', filters=['/ComponentTypes/ClientComponent'])
ws.saveXML('Platform.arxml', filters=['/AUTOSAR_Platform', '/Predefined_DEV'])
ws.saveXML('PortInterfaces.arxml', filters = ["/PortInterfaces"])
|
{
"content_hash": "69bca9c906c2bc70fdc33354db043e5e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 55.97435897435897,
"alnum_prop": 0.7084287677508017,
"repo_name": "cogu/autosar",
"id": "4fa985a11c67bece28bbc9de1f1d8073a84e77bc",
"size": "4366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/autosar4_guide/examples/create_client_server_port.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "1039000"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
}
|
"""ResNet model for TF2."""
import functools
from typing import Optional, Sequence, Text, Union
from class_balanced_distillation import resnet_helpers
import tensorflow.compat.v2 as tf
class CosineLayer(tf.keras.Model):
"""Cosine Layer."""
def __init__(self, in_features, num_classes, name=None):
super(CosineLayer, self).__init__(name=name)
self.num_classes = num_classes
self.sigma = tf.Variable(20.0, dtype=tf.float32,
trainable=False, name='sigma')
w_init = tf.keras.initializers.GlorotUniform()
self.w = tf.Variable(initial_value=w_init(shape=(in_features, num_classes),
dtype='float32'),
trainable=True, name='weights')
def __call__(self, x, labels=None, training=False):
x = tf.math.l2_normalize(x, 1)
weights = tf.math.l2_normalize(self.w, 0)
x = tf.matmul(x, weights)
x = self.sigma * x
return x
class BlockGroup(tf.keras.Model):
"""Higher level block for ResNet implementation."""
def __init__(self,
channels,
num_blocks,
stride,
norm,
block,
name = None):
super(BlockGroup, self).__init__(name=name)
self._channels = channels * block.EXPANSION
self._num_blocks = num_blocks
self._stride = stride
self._blocks = []
for id_block in range(num_blocks):
self._blocks.append(
block(
channels=self._channels,
stride=stride if id_block == 0 else 1,
use_projection=(id_block == 0),
norm=norm,
name='block_{}'.format(id_block)))
def __call__(self, inputs, **norm_kwargs):
net = inputs
for block in self._blocks:
net = block(net, **norm_kwargs)
return net
class ResNet(tf.keras.Model):
"""ResNet model."""
def __init__(self,
blocks_per_group_list,
num_classes,
norm,
block,
channels_per_group_list = (64, 128, 256, 512),
proj_dim=-1,
name = None):
"""Constructs a ResNet model.
Args:
blocks_per_group_list: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
norm: The normalization object.
block: Pointer to class of ResNet block (eg. BasicBlock).
channels_per_group_list: A sequence of length 4 that indicates the number
of channels used for each block in each group.
proj_dim: Output dimensionality of the projection layer before the
classifier. Set -1 to skip.
name: Name of the module.
"""
super(ResNet, self).__init__(name=name)
# Number of blocks in each group for ResNet.
if len(blocks_per_group_list) != 4:
raise ValueError(
'`blocks_per_group_list` must be of length 4 not {}'.format(
len(blocks_per_group_list)))
self._blocks_per_group_list = blocks_per_group_list
# Number of channels in each group for ResNet.
if len(channels_per_group_list) != 4:
raise ValueError(
'`channels_per_group_list` must be of length 4 not {}'.format(
len(channels_per_group_list)))
self._channels_per_group_list = channels_per_group_list
self._initial_conv = tf.keras.layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
padding='same',
use_bias=False,
name='initial_conv')
self._initial_norm = norm(name='initial_' + 'bn')
self._block_groups = []
strides = [1, 2, 2, 2]
for i in range(4):
self._block_groups.append(
BlockGroup(
channels=self._channels_per_group_list[i],
num_blocks=self._blocks_per_group_list[i],
stride=strides[i],
norm=norm,
block=block,
name='block_group_%d' % i))
self.proj_dim = proj_dim
if self.proj_dim != -1:
self.proj_layer = tf.keras.layers.Dense(
units=self.proj_dim, name='proj_layer')
self.embedding_dim = self.proj_dim
else:
self.embedding_dim = 512 * block.EXPANSION
self.linear = CosineLayer(in_features=self.embedding_dim,
num_classes=num_classes)
def __call__(self,
inputs,
labels=None,
training=False,
return_features=False):
net = inputs
net = self._initial_conv(net)
net = self._initial_norm(net, training=training)
net = tf.nn.relu(net)
net = tf.nn.max_pool2d(
net, ksize=3, strides=2, padding='SAME', name='initial_max_pool')
for block_group in self._block_groups:
net = block_group(net, training=training)
net = tf.reduce_mean(net, axis=[1, 2], name='final_avg_pool')
if self.proj_dim != -1:
net = self.proj_layer(net)
net = tf.nn.relu(net)
net = tf.nn.l2_normalize(net, axis=1)
if return_features:
features = net
net = self.linear(net)
if return_features:
return features, net
else:
return net
def resnet(num_layers,
num_classes,
proj_dim,
name = None):
"""Constructs a ResNet model.
Args:
num_layers: The number of layers of ResNet.
num_classes: The number of classes to classify the inputs into.
proj_dim: Dimensions of the dense projection layer before the classifier
name: Name of the module.
Returns:
model: the resnet model.
"""
block_group_dict = {
18: ([2, 2, 2, 2], 'basic'),
26: ([2, 2, 2, 2], 'bottleneck'),
34: ([3, 4, 6, 3], 'basic'),
50: ([3, 4, 6, 3], 'bottleneck'),
101: ([3, 4, 23, 3], 'bottleneck'),
152: ([3, 8, 36, 3], 'bottleneck'),
}
resnet_type_dict = {
'resnet': {
'basic': resnet_helpers.BasicBlock,
'bottleneck': resnet_helpers.BottleNeckBlockV1,
},
}
# Check number of layers
if num_layers in block_group_dict:
block_groups, block_type = block_group_dict[num_layers]
else:
raise NotImplementedError(
'Please choose among the '
'18-, 26-, 34-, 50-, 101-, or 152-layer variant of ResNet.')
norm = functools.partial(
tf.keras.layers.BatchNormalization,
momentum=0.9,
epsilon=1e-5,
)
block = resnet_type_dict['resnet'][block_type]
print('Initializing resnet-{}'.format(num_layers))
model = ResNet(
blocks_per_group_list=block_groups,
num_classes=num_classes,
norm=norm,
block=block,
proj_dim=proj_dim,
name=name)
return model
|
{
"content_hash": "cb8339f3d8ce854fca0496fbb688e0c2",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 80,
"avg_line_length": 29.017316017316016,
"alnum_prop": 0.5785469192898702,
"repo_name": "google-research/google-research",
"id": "f9c963d85531a6da04c07bb3cb5cd04bd729e125",
"size": "7311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class_balanced_distillation/resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from skimage import img_as_ubyte
from plantcv.plantcv import fatal_error
from plantcv.plantcv import Spectral_data
from plantcv.plantcv import params
from plantcv.plantcv.hyperspectral.read_data import _make_pseudo_rgb
from plantcv.plantcv._debug import _debug
def image_fusion(img1, img2, wvs1, wvs2, array_type="multispectral"):
"""Fuse two images of the same size together to create a multispectral image
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
:param img1: numpy.ndarray
:param img2: numpy.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:return fused_array: plantcv.Spectral_data
"""
# If the image is 2D, expand to 3D to make stackable
img1 = _expand_img_dims(img1)
r1, c1, _ = img1.shape
# If the image is 2D, expand to 3D to make stackable
img2 = _expand_img_dims(img2)
r2, c2, _ = img2.shape
# Fatal error if images are not the same spatial dimensions
if (r1, c1) != (r2, c2):
fatal_error("Input images should have the same image size")
# If the images are not the same data type, convert to 8-bit unsigned integer
if img1.dtype != img2.dtype:
img1 = img_as_ubyte(img1)
img2 = img_as_ubyte(img2)
# Concatenate the images on the depth/spectral (z) axis
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:, :, ind]
# Scale the array data to 0-1 by dividing by the maximum data type value
array_data = (array_data / np.iinfo(array_data.dtype).max).astype(np.float32)
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=float(np.amax(array_data)),
min_value=float(np.amin(array_data)),
d_type=array_data.dtype,
wavelength_dict=wavelength_dict,
samples=c, lines=r, interleave="NA",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename="NA", default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
def _expand_img_dims(img):
"""Expand 2D images to 3D
Inputs:
img - input image
Returns:
img - image with expanded dimensions
:params img: numpy.ndarray
:return img: numpy.ndarray
"""
# If the image is 2D, expand to 3D to make stackable
if len(img.shape) == 2:
return np.expand_dims(img, axis=2)
# Return copy of image to break the reference to the input image
return img.copy()
|
{
"content_hash": "052236f0235b41c6ec7d24c098c099e6",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 119,
"avg_line_length": 35.64646464646464,
"alnum_prop": 0.6344573533578918,
"repo_name": "stiphyMT/plantcv",
"id": "9747be17c767aa252c64988433a4fdaf1793d1a4",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plantcv/plantcv/image_fusion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "955647"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
}
|
import pytest
def reverse_string_recursively(data: str) -> str:
if data == "" or data == None or len(data) == 1:
return data
return data[len(data) - 1] + reverse_string_recursively(data[0 : len(data) - 1])
@pytest.mark.parametrize(("data", "res"), [("anu", "una"), ("karthik", "kihtrak")])
def test_reverse_string_recursively(data: str, res: str) -> None:
assert res == reverse_string_recursively(data)
|
{
"content_hash": "8b57cf83fba6e1b829dd3dba2d928e8f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 35.583333333333336,
"alnum_prop": 0.639344262295082,
"repo_name": "anu-ka/coding-problems",
"id": "8832320d573d318b2f539b957321950bc23ac544",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reverse_string_recursively.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "11890"
},
{
"name": "Java",
"bytes": "10158"
},
{
"name": "Python",
"bytes": "160560"
}
],
"symlink_target": ""
}
|
class Code(object):
"""A convenience object for constructing code.
Logically each object should be a block of code. All methods except |Render|
and |IsEmpty| return self.
"""
def __init__(self, indent_size=2, comment_length=80):
self._code = []
self._indent_size = indent_size
self._comment_length = comment_length
self._line_prefixes = []
def Append(self, line='',
substitute=True,
indent_level=None,
new_line=True,
strip_right=True):
"""Appends a line of code at the current indent level or just a newline if
line is not specified.
substitute: indicated whether this line should be affected by
code.Substitute().
new_line: whether this should be added as a new line, or should be appended
to the last line of the code.
strip_right: whether or not trailing whitespace should be stripped.
"""
if line:
prefix = indent_level * ' ' if indent_level else ''.join(
self._line_prefixes)
else:
prefix = ''
if strip_right:
line = line.rstrip()
if not new_line and self._code:
self._code[-1].value += line
else:
self._code.append(Line(prefix + line, substitute=substitute))
return self
def IsEmpty(self):
"""Returns True if the Code object is empty.
"""
return not bool(self._code)
def Concat(self, obj, new_line=True):
"""Concatenate another Code object onto this one. Trailing whitespace is
stripped.
Appends the code at the current indent level. Will fail if there are any
un-interpolated format specifiers eg %s, %(something)s which helps
isolate any strings that haven't been substituted.
"""
if not isinstance(obj, Code):
raise TypeError(type(obj))
assert self is not obj
if not obj._code:
return self
for line in obj._code:
try:
# line % () will fail if any substitution tokens are left in line
if line.substitute:
line.value %= ()
except TypeError:
raise TypeError('Unsubstituted value when concatting\n' + line.value)
except ValueError:
raise ValueError('Stray % character when concatting\n' + line.value)
first_line = obj._code.pop(0)
self.Append(first_line.value, first_line.substitute, new_line=new_line)
for line in obj._code:
self.Append(line.value, line.substitute)
return self
def Cblock(self, code):
"""Concatenates another Code object |code| onto this one followed by a
blank line, if |code| is non-empty."""
if not code.IsEmpty():
self.Concat(code).Append()
return self
def Sblock(self, line=None, line_prefix=None, new_line=True):
"""Starts a code block.
Appends a line of code and then increases the indent level. If |line_prefix|
is present, it will be treated as the extra prefix for the code block.
Otherwise, the prefix will be the default indent level.
"""
if line is not None:
self.Append(line, new_line=new_line)
self._line_prefixes.append(line_prefix or ' ' * self._indent_size)
return self
def Eblock(self, line=None):
"""Ends a code block by decreasing and then appending a line (or a blank
line if not given).
"""
# TODO(calamity): Decide if type checking is necessary
#if not isinstance(line, basestring):
# raise TypeError
self._line_prefixes.pop()
if line is not None:
self.Append(line)
return self
def Comment(self, comment, comment_prefix='// ',
wrap_indent=0, new_line=True):
"""Adds the given string as a comment.
Will split the comment if it's too long. Use mainly for variable length
comments. Otherwise just use code.Append('// ...') for comments.
Unaffected by code.Substitute().
"""
# Helper function to trim a comment to the maximum length, and return one
# line and the remainder of the comment.
def trim_comment(comment, max_len):
if len(comment) <= max_len:
return comment, ''
last_space = comment.rfind(' ', 0, max_len + 1)
if last_space != -1:
line = comment[0:last_space]
comment = comment[last_space + 1:]
else:
line = comment[0:max_len]
comment = comment[max_len:]
return line, comment
# First line has the full maximum length.
if not new_line and self._code:
max_len = self._comment_length - len(self._code[-1].value)
else:
max_len = (self._comment_length - len(''.join(self._line_prefixes)) -
len(comment_prefix))
line, comment = trim_comment(comment, max_len)
self.Append(comment_prefix + line, substitute=False, new_line=new_line)
# Any subsequent lines be subject to the wrap indent.
max_len = (self._comment_length - len(''.join(self._line_prefixes)) -
len(comment_prefix) - wrap_indent)
while len(comment):
line, comment = trim_comment(comment, max_len)
self.Append(comment_prefix + (' ' * wrap_indent) + line, substitute=False)
return self
def Substitute(self, d):
"""Goes through each line and interpolates using the given dict.
Raises type error if passed something that isn't a dict
Use for long pieces of code using interpolation with the same variables
repeatedly. This will reduce code and allow for named placeholders which
are more clear.
"""
if not isinstance(d, dict):
raise TypeError('Passed argument is not a dictionary: ' + d)
for i, line in enumerate(self._code):
if self._code[i].substitute:
# Only need to check %s because arg is a dict and python will allow
# '%s %(named)s' but just about nothing else
if '%s' in self._code[i].value or '%r' in self._code[i].value:
raise TypeError('"%s" or "%r" found in substitution. '
'Named arguments only. Use "%" to escape')
self._code[i].value = line.value % d
self._code[i].substitute = False
return self
def TrimTrailingNewlines(self):
"""Trims any trailing newlines.
"""
while self._code:
if self._code[-1].value != '':
return
self._code = self._code[:-1]
def Render(self):
"""Renders Code as a string.
"""
return '\n'.join([l.value for l in self._code])
class Line(object):
"""A line of code.
"""
def __init__(self, value, substitute=True):
self.value = value
self.substitute = substitute
|
{
"content_hash": "f30fd10a8fff60f990144c8f02c01807",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 80,
"avg_line_length": 33.8586387434555,
"alnum_prop": 0.6352249884026596,
"repo_name": "joone/chromium-crosswalk",
"id": "d637026d6d0db88aaf950ac5d0dc9c673fbc4929",
"size": "6634",
"binary": false,
"copies": "7",
"ref": "refs/heads/2016.04.css-round-display-edtior-draft-1",
"path": "tools/json_schema_compiler/code.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Posterior Predictive Check Plot
===============================
"""
import arviz as az
data = az.load_arviz_data("non_centered_eight")
ax = az.plot_ppc(data, alpha=0.03, figsize=(12, 6), backend="bokeh")
|
{
"content_hash": "52efa94beb1bed172ba3b4cab5906188",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 68,
"avg_line_length": 26.125,
"alnum_prop": 0.5885167464114832,
"repo_name": "arviz-devs/arviz",
"id": "42cc9824a4f08ec2f744a814481a0a8e357f52e5",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/bokeh/bokeh_plot_ppc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5900"
},
{
"name": "Dockerfile",
"bytes": "1771"
},
{
"name": "HTML",
"bytes": "1343"
},
{
"name": "Jupyter Notebook",
"bytes": "641262"
},
{
"name": "Makefile",
"bytes": "688"
},
{
"name": "PowerShell",
"bytes": "2668"
},
{
"name": "Python",
"bytes": "1634423"
},
{
"name": "R",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "7276"
},
{
"name": "TeX",
"bytes": "24620"
}
],
"symlink_target": ""
}
|
import sys
if sys.version_info.major == 3:
from .config import Config
else:
from config import Config
|
{
"content_hash": "8a548f238f2d9f9c014c839198e1cd0b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 31,
"avg_line_length": 21.2,
"alnum_prop": 0.7452830188679245,
"repo_name": "MatteusDeloge/opengrid",
"id": "b01d011987675d9b98845cd43854662f51ccf705",
"size": "130",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "opengrid/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "243319"
},
{
"name": "Jupyter Notebook",
"bytes": "104480"
},
{
"name": "Python",
"bytes": "173204"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
}
|
"""Tests for the Android contacts2.db database event formatter."""
import unittest
from plaso.formatters import android_calls
from tests.formatters import test_lib
class AndroidCallFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Android call history event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = android_calls.AndroidCallFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = android_calls.AndroidCallFormatter()
expected_attribute_names = [
u'call_type', u'number', u'name', u'duration']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0dfcdd831ea29c1ca1114885db7d50c6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 28.15625,
"alnum_prop": 0.7369589345172031,
"repo_name": "dc3-plaso/plaso",
"id": "982a87f1a1c6e4860caa83c039088a4741fcb2fb",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/formatters/android_calls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
}
|
"""
Django settings for fightit project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_tbw*s95-8q5ntoh#0&xfx0fpk#k&qlfh9t#-i9=tt3x+&)f$r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fightit',
'ui',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fightit.urls'
WSGI_APPLICATION = 'fightit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '../bower_components/'),
os.path.join(BASE_DIR, 'static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
{
"content_hash": "441eefc88f760821a90e2f79d4f02606",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 71,
"avg_line_length": 24.099009900990097,
"alnum_prop": 0.7025472473294988,
"repo_name": "miwoow/fight-heros",
"id": "eb7df6bab535001faa5ff794ed7702f3e3dfabd2",
"size": "2434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fightit/fightit/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126"
},
{
"name": "Python",
"bytes": "4475"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
}
|
"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import stat
import sys
import py_compile
def compile_dir(dir, maxlevels=10, ddir=None, force=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
"""
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir:
dfile = os.path.join(ddir, name)
else:
dfile = None
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
cfile = fullname + (__debug__ and 'c' or 'o')
ftime = os.stat(fullname)[stat.ST_MTIME]
try: ctime = os.stat(cfile)[stat.ST_MTIME]
except os.error: ctime = 0
if (ctime > ftime) and not force: continue
print 'Compiling', fullname, '...'
try:
py_compile.compile(fullname, None, dfile)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
if type(sys.exc_type) == type(''):
exc_type_name = sys.exc_type
else: exc_type_name = sys.exc_type.__name__
print 'Sorry:', exc_type_name + ':',
print sys.exc_value
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
compile_dir(fullname, maxlevels - 1, dfile, force)
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None, force)
return success
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfd:')
except getopt.error, msg:
print msg
print "usage: compileall [-l] [-f] [-d destdir] [directory ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-d destdir: purported directory name for error messages"
print "if no directory arguments, -l sys.path is assumed"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args:
for dir in args:
success = success and compile_dir(dir, maxlevels, ddir, force)
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
sys.exit(not main())
|
{
"content_hash": "4d38baf0cd2da8f5fe0d2810cd180fb9",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 78,
"avg_line_length": 33.484375,
"alnum_prop": 0.5685954269715352,
"repo_name": "MalloyPower/parsing-python",
"id": "e56c8b284e835c72417fe5579820242fefc08d5a",
"size": "4286",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/dos-8x3/compilea.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import os
import imp
import inspect
from pact_test.either import *
from pact_test.constants import *
from pact_test.utils.logger import *
from pact_test.utils.pact_utils import get_pact
from pact_test.utils.pact_helper_utils import load_pact_helper
from pact_test.runners.service_consumers.state_test import verify_state
class ServiceConsumerTestSuiteRunner(object):
pact_helper = None
def __init__(self, config):
self.config = config
def verify(self):
print('')
debug('Verify consumers: START')
pact_helper = load_pact_helper(self.config.consumer_tests_path)
if type(pact_helper) is Right:
self.pact_helper = pact_helper.value
tests = self.collect_tests()
if type(tests) is Right:
debug(str(len(tests.value)) + ' test(s) found.')
debug('Execute Pact Helper setup: START')
self.pact_helper.setup()
debug('Execute Pact Helper setup: DONE')
test_results = Right(list(map(self.verify_test, tests.value)))
debug('Execute Pact Helper tear down: START')
self.pact_helper.tear_down()
debug('Execute Pact Helper tear down: DONE')
debug('Verify consumers: DONE')
return test_results
error('Verify consumers: EXIT WITH ERRORS:')
error(tests.value)
return tests
error('Verify consumers: EXIT WITH ERRORS:')
error(pact_helper.value)
return pact_helper
def verify_test(self, test):
validity_check = test.is_valid()
if type(validity_check) is Right:
pact = get_pact(test.pact_uri)
if type(pact) is Right:
interactions = pact.value.get('interactions', {})
debug(str(len(interactions)) + ' interaction(s) found')
test_results = [verify_state(i, self.pact_helper, test) for i in interactions]
return Right({'test': test.__class__.__name__, 'results': test_results})
error(pact.value)
return pact
error(validity_check.value)
return validity_check
def collect_tests(self):
root = self.config.consumer_tests_path
files = list(filter(filter_rule, self.all_files()))
files = list(map(lambda f: os.path.join(root, f), files))
tests = []
for idx, filename in enumerate(files):
test = imp.load_source('test' + str(idx), filename)
for name, obj in inspect.getmembers(test):
if inspect.isclass(obj) and len(inspect.getmro(obj)) > 2:
test_parent = inspect.getmro(obj)[1].__name__
if test_parent == TEST_PARENT:
tests.append(obj())
if not files:
return Left(MISSING_TESTS)
return Right(tests)
def all_files(self):
return os.listdir(self.config.consumer_tests_path)
def filter_rule(filename):
return (filename != '__init__.py' and
filename.endswith('.py') and
filename.endswith('pact_helper.py') is False)
|
{
"content_hash": "470210d4cb41a3d98eb3815ed654f9c6",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 94,
"avg_line_length": 38.98765432098765,
"alnum_prop": 0.5880303989867004,
"repo_name": "Kalimaha/pact-test",
"id": "9abe39ddf9991a4d4c3a430986e1dbdffa84d1e6",
"size": "3158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pact_test/runners/service_consumers/test_suite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127995"
}
],
"symlink_target": ""
}
|
from PIL import Image
import numpy as np
from cps2zmq.process import Tile, ColorTile
from cps2zmq.process import GraphicAsset
class Sprite(GraphicAsset):
"""
A Sprite is a grouping of :py:mod:`~cps2zmq.gather.Tile.Tile` that use the same palette.
Attributes:
base_tile (str): the memory address of the first :py:mod:`~cps2zmq.gather.Tile.Tile`
tiles (:obj:`list` of :py:mod:`~cps2zmq.gather.Tile.Tile`): Tiles that make up the Sprite
palnum (int): which of the 32 palettes in a Frame the Sprite uses
location (int, int): the (x,y) coordinate where the Sprite will be drawn on the screen
size (int, int): (width, height) the size of the Sprite in Tiles. (1, 1) means a single Tile
flips (int, int): (flipx, flipy) determines if the Sprite is flipped over its X or Y axis
priority (int): determines which layer the Sprite is displayed on. 0 is lowest, 3 is highest
"""
def __init__(self, base_tile, tiles, palnum, location, size, flips, priority=0):
self.base_tile = base_tile
self.tiles = tiles
self.palnum = palnum
self.location = location
self.size = size
self.flips = flips
self.priority = priority
def __repr__(self):
addrs = [tile.address for tile in self.tiles if tile]
loc = " Location: (" + str(self.location[0]) + ", " + str(self.location[1])
size = " Size: (" + str(self.size[0]) + ", " + str(self.size[1])
return "Sprite contains tiles: " + str(addrs) + loc + ")" + size + ")"
def color_tiles(self, palette):
"""
Converts any :obj:`Tile` the :obj:`Sprite` has into :obj:`ColorTile`.
Args:
palette (dict): the palette to use.
"""
self.tiles = [ColorTile.from_tile(tile, palette)
for tile in self.tiles
if isinstance(tile, Tile)]
def to_array(self, flip=True):
"""
Provides contents of Sprite as a numpy array.
Does any necessary flips in the process.
Args:
flip (bool, optional): Whether or not the Sprite contents are flipped. Defaults to True.
Returns:
a 2D numpy array.
"""
arrays = [tile.to_array() for tile in self.tiles]
array2d = list2d(arrays, self.size)
array_rows = [np.concatenate(row, axis=1) for row in array2d]
preflip = np.concatenate(array_rows, axis=0)
if flip and self.flips[0]:
preflip = np.fliplr(preflip)
if flip and self.flips[1]:
preflip = np.flipud(preflip)
return preflip
def to_tile(self):
"""
This method is *probably* used when writing the contents of the :obj:`Sprite` to file.
Converts any :obj:`ColorTile` objects the :obj:`Sprite` has to :obj:`Tile` objects.
Returns:
a list of Tiles.
"""
return [t.to_tile() if isinstance(t, ColorTile) else t for t in self.tiles]
# todo: exception handling related to improperly formed dict
@classmethod
def from_dict(cls, dict_):
"""
A factory method to create a Sprite from a dict of params.
Args:
dict_ (dict): a dict of parameters.
Returns:
a Sprite. The Tiles in the Sprite are empty at this point though,
and will need to be filled in. This can be done by calling
`tile_operations.read_tiles_from_file`
"""
tile_number = dict_['base_tile']
size = dict_['size']
tiles = []
for i in range(size[1]):
for j in range(size[0]):
offset = i * 0x10 + j * 0x1
addr = hex(int(tile_number, 16) + offset)
tiles.append(Tile(addr, None))
dict_['tiles'] = tiles
return cls(**dict_)
@staticmethod
def sprite_mask(byte_data):
"""
Turns the 8 bytes of raw sprite information into something usable.
Args:
byte_data (:obj:`list`): a list of 4 uint16 containing all the data for a Sprite.
Returns:
a dict. This dict can be used by the Sprite factory method :meth:`fromdict`.
"""
dict_ = {}
dict_['priority'] = (byte_data[0] & 0xC000) >> 14
top_half = "{0:#x}".format((byte_data[1] & 0x6000) >> 13)
bottom_half = "{0:x}".format(byte_data[2])
dict_['base_tile'] = ''.join([top_half, bottom_half])
tile_height = ((byte_data[3] & 0xF000) >> 12) + 1
tile_width = ((byte_data[3] & 0x0F00) >> 8) + 1
dict_['size'] = (tile_width, tile_height)
#(0 = Offset by X:-64,Y:-16, 1 = No offset)
offset = (byte_data[3] & 0x0080) >> 7
location_x = byte_data[0] & 0x03FF
location_y = byte_data[1] & 0x03FF
if not offset:
location_x -= 64
location_y -= 16
dict_['location'] = (location_x, location_y)
#Y flip, X flip (1= enable, 0= disable)
flip_x = (byte_data[3] & 0x0040) >> 69
flip_y = (byte_data[3] & 0x0020) >> 5
dict_['flips'] = (flip_x, flip_y)
dict_['palnum'] = "{0:d}".format(byte_data[3] & 0x001F)
# Keeping these because maybe I'll need them one day
# dict_['eol'] = (byte_data[1] & 0x8000) >> 15
# dict_['mem_addr'] = "{0:x}".format(byte_data[4])
return dict_
# def _generate_tiles(self, tile_number, size):
# """
# Fills in the rest of the Tile info for a Sprite.
# Args:
# tile_number (str): the memory address of the base tile
# size ((int, int)): the size of the Sprite
# Return:
# a list of Tile.
# """
# tiles = []
# for i in range(size[1]):
# for j in range(size[0]):
# offset = i * 0x10 + j * 0x1
# addr = hex(int(tile_number, 16) + offset)
# tiles.append(Tile(addr, None))
# return tiles
@classmethod
def from_image(cls, image, sprite=None):
"""
Converts a image into a :obj:`Sprite`.
Args:
image (str): path to an image.\
Currently only .bmp and .png images are known to work, others may or may not.
sprite (:obj:`Sprite`): The attributes of the :obj:`Sprite`\
will be used by the new :obj:`Sprite`.
Returns:
a Sprite.
"""
im = Image.open(image)
if sprite.flips[0]:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
if sprite.flips[1]:
im = im.transpose(Image.FLIP_TOP_BOTTOM)
cropped_imgs = []
addresses = []
for i in range(sprite.size[1]):
for j in range(sprite.size[0]):
change = (16 * j, 16 * i, 16 + 16 * j, 16 + 16 * i)
cropped_imgs.append(im.crop(change))
changed_addr = int(sprite.base_tile, 16) + 0x10 * i + 0x1 * j
addresses.append(hex(changed_addr))
zipped = zip(cropped_imgs, addresses)
try:
tiles = [ColorTile(addr, list(img.getdata()), None) for img, addr in zipped]
except ValueError:
tiles = [Tile(addr, bytes(img.getdata())) for img, addr in zipped]
if sprite:
sprite.tiles = tiles
else:
sprite = cls(None, tiles, None, None, (None, None), (None, None))
return sprite
# todo: exception handling for sizing issues
def list2d(list_, size):
"""
Turns a linear :obj:`list` into a list of lists.
Args:
size (int, int): the desired size of the reshaped list
Returns:
a list of lists
"""
list_2d = []
for i in range(size[1]):
offset = size[0] * i
list_2d.append(list_[offset:offset + size[0]])
return list_2d
|
{
"content_hash": "f0dd3c4cc04ae99f963bdf9253c12bf4",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 100,
"avg_line_length": 33.94396551724138,
"alnum_prop": 0.5481904761904762,
"repo_name": "goosechooser/cps2-zmq",
"id": "6c6bdbfc700f7146accca0d9a07824a17d0bd48d",
"size": "7875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cps2zmq/process/Sprite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "4065"
},
{
"name": "Python",
"bytes": "57562"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ArrivalsProportionPerHour',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hour', models.FloatField(default=1)),
('proportion', models.FloatField(default=1)),
],
),
migrations.CreateModel(
name='Ciclovia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('place', models.CharField(max_length=20)),
('start_hour', models.FloatField(default=0)),
('end_hour', models.FloatField(default=0)),
('num_tracks', models.IntegerField(default=1)),
('reference_track', models.IntegerField(default=0)),
('reference_hour', models.IntegerField(default=0)),
('reference_arrival_rate', models.FloatField(default=0)),
('arrivals_loaded', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('filename', models.CharField(max_length=100)),
('docfile', models.FileField(upload_to=b'documents/%Y/%m/%d')),
],
),
migrations.CreateModel(
name='NeighboorInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('neighboorId', models.IntegerField(default=1)),
('probability', models.FloatField(default=1)),
('direction', models.CharField(max_length=10)),
('fromDirection', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='ParticipantType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activity', models.CharField(max_length=30)),
('velocity', models.FloatField(default=1)),
('percentage', models.FloatField(default=1)),
('ciclovia', models.ForeignKey(to='CicloviaProgram.Ciclovia')),
],
),
migrations.CreateModel(
name='SimulationParameters',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('replications', models.FloatField(default=1)),
('arrivals_probability_distribution', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='SimulationResults',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(verbose_name=b'date executed')),
('sim_time', models.FloatField(default=0)),
('total_arrivals', models.IntegerField(default=0)),
('average_time', models.FloatField(default=0)),
('standard_deviation_time', models.FloatField(default=0)),
('average_number_system', models.FloatField(default=0)),
('is_validation', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='SimulationResultsCompiled',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(verbose_name=b'date executed')),
('num_runs', models.FloatField(default=0)),
('avg_total_arrivals', models.IntegerField(default=0)),
('stdev_total_arrivals', models.IntegerField(default=0)),
('hw_total_arrivals', models.IntegerField(default=0)),
('average_number_system', models.FloatField(default=0)),
('stdev_number_system', models.FloatField(default=0)),
('hw_number_system', models.FloatField(default=0)),
('is_validation', models.BooleanField(default=False)),
('ciclovia', models.ForeignKey(to='CicloviaProgram.Ciclovia')),
],
),
migrations.CreateModel(
name='SimulationResultsCompiledFlowTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hour', models.IntegerField(default=0)),
('avg_flow_hour', models.FloatField(default=0)),
('stdev_flow_hour', models.FloatField(default=0)),
('hw_flow_hour', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='SimulationResultsCompiledPerTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('track', models.IntegerField(default=0)),
('average_number_track', models.IntegerField(default=0)),
('stdev_number_track', models.IntegerField(default=0)),
('hw_number_track', models.IntegerField(default=0)),
('average_total_arrivals', models.IntegerField(default=0)),
('stdev_total_arrivals', models.IntegerField(default=0)),
('hw_total_arrivals', models.IntegerField(default=0)),
('average_total_flow', models.IntegerField(default=0)),
('stdev_total_flow', models.IntegerField(default=0)),
('hw_total_flow', models.IntegerField(default=0)),
('simulation_compiled', models.ForeignKey(to='CicloviaProgram.SimulationResultsCompiled')),
],
),
migrations.CreateModel(
name='SimulationResultsFlowPerTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hour', models.IntegerField(default=0)),
('flow_hour', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='SimulationResultsPerTrack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('track', models.IntegerField(default=0)),
('total_arrivals', models.IntegerField(default=0)),
('total_flow', models.FloatField(default=0)),
('average_number_track', models.IntegerField(default=0)),
('simulation', models.ForeignKey(to='CicloviaProgram.SimulationResults')),
],
),
migrations.CreateModel(
name='TimeInSystemDistribution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.FloatField(default=1)),
('percentage', models.FloatField(default=1)),
('ciclovia', models.ForeignKey(to='CicloviaProgram.Ciclovia')),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('id_track', models.IntegerField(default=100)),
('distance', models.FloatField(default=1)),
('probability', models.FloatField(default=1)),
('probabilityBegin', models.FloatField(default=1)),
('probabilityEnd', models.FloatField(default=1)),
('arrival_proportion', models.FloatField(default=1)),
('ciclovia', models.ForeignKey(to='CicloviaProgram.Ciclovia')),
],
),
migrations.AddField(
model_name='simulationresultsflowpertrack',
name='track_simulation',
field=models.ForeignKey(to='CicloviaProgram.SimulationResultsPerTrack'),
),
migrations.AddField(
model_name='simulationresultscompiledflowtrack',
name='track_simulation',
field=models.ForeignKey(to='CicloviaProgram.SimulationResultsCompiledPerTrack'),
),
migrations.AddField(
model_name='simulationresults',
name='ciclovia',
field=models.ForeignKey(to='CicloviaProgram.SimulationResultsCompiled'),
),
migrations.AddField(
model_name='neighboorinfo',
name='track',
field=models.ForeignKey(to='CicloviaProgram.Track'),
),
migrations.AddField(
model_name='arrivalsproportionperhour',
name='ciclovia',
field=models.ForeignKey(to='CicloviaProgram.Ciclovia'),
),
]
|
{
"content_hash": "24d744b90a677ca4f115f666d08138af",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 114,
"avg_line_length": 48.83076923076923,
"alnum_prop": 0.5639571518588532,
"repo_name": "CicloviaTeam/CicloviaProgram",
"id": "76131ba9aef14245592cebe47f18986f5926cc9c",
"size": "9546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CicloviaProgram/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50506"
},
{
"name": "HTML",
"bytes": "130779"
},
{
"name": "JavaScript",
"bytes": "206960"
},
{
"name": "Python",
"bytes": "145253"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class PendingFriendRequests(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the PendingFriendRequests Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(PendingFriendRequests, self).__init__(temboo_session, '/Library/Foursquare/Users/PendingFriendRequests')
def new_input_set(self):
return PendingFriendRequestsInputSet()
def _make_result_set(self, result, path):
return PendingFriendRequestsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PendingFriendRequestsChoreographyExecution(session, exec_id, path)
class PendingFriendRequestsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the PendingFriendRequests
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(PendingFriendRequestsInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(PendingFriendRequestsInputSet, self)._set_input('ResponseFormat', value)
class PendingFriendRequestsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the PendingFriendRequests Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class PendingFriendRequestsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PendingFriendRequestsResultSet(response, path)
|
{
"content_hash": "be053cc2175e7fe8d9058e5a1ebf499b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 171,
"avg_line_length": 41.80327868852459,
"alnum_prop": 0.7247058823529412,
"repo_name": "jordanemedlock/psychtruths",
"id": "7004f39384b7b7e17a0b07f04699e17d3c3dbe33",
"size": "3448",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Foursquare/Users/PendingFriendRequests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from chatterbot.adapters.logic import EvaluateMathematically
from chatterbot.conversation import Statement
class EvaluateMathematicallyTests(TestCase):
def setUp(self):
self.adapter = EvaluateMathematically()
def test_can_process(self):
statement = Statement("What is 10 + 10 + 10?")
self.assertTrue(self.adapter.can_process(statement))
def test_can_not_process(self):
statement = Statement("What is your favorite song?")
self.assertFalse(self.adapter.can_process(statement))
def test_is_integer(self):
self.assertTrue(self.adapter.is_integer(42))
def test_is_float(self):
self.assertTrue(self.adapter.is_float(0.5))
def test_is_operator(self):
self.assertTrue(self.adapter.is_operator('+'))
def test_is_not_operator(self):
self.assertFalse(self.adapter.is_operator('9'))
def test_normalize_empty_string(self):
"""
If a string is empty, the string should be returned.
"""
self.assertEqual(self.adapter.normalize(""), "")
def test_normalize_text_to_lowercase(self):
normalized = self.adapter.normalize("HELLO")
self.assertTrue(normalized.islower())
def test_normalize_punctuation(self):
normalized = self.adapter.normalize("the end.")
self.assertEqual(normalized, "the end")
def test_load_data(self):
self.adapter.load_data("english")
self.assertIn("numbers", self.adapter.data)
class MathematicalEvaluationTests(TestCase):
def setUp(self):
import sys
self.adapter = EvaluateMathematically()
# Some tests may return decimals under python 3
self.python_version = sys.version_info[0]
def test_addition_operator(self):
statement = Statement("What is 100 + 54?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 100 + 54 ) = 154")
def test_subtraction_operator(self):
statement = Statement("What is 100 - 58?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 100 - 58 ) = 42")
def test_multiplication_operator(self):
statement = Statement("What is 100 * 20")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 100 * 20 ) = 2000")
def test_division_operator(self):
statement = Statement("What is 100 / 20")
confidence, response = self.adapter.process(statement)
if self.python_version <= 2:
self.assertEqual(response.text, "( 100 / 20 ) = 5")
else:
self.assertEqual(response.text, "( 100 / 20 ) = 5.0")
def test_parenthesized_multiplication_and_addition(self):
statement = Statement("What is 100 + ( 1000 * 2 )?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 100 + ( ( 1000 * ( 2 ) ) ) ) = 2100")
def test_parenthesized_with_words(self):
statement = Statement("What is four plus 100 + ( 100 * 2 )?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 4 + ( 100 + ( ( 100 * ( 2 ) ) ) ) ) = 304")
def test_word_numbers_addition(self):
statement = Statement("What is one hundred + four hundred?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( 100 + 400 ) = 500")
def test_word_division_operator(self):
statement = Statement("What is 100 divided by 100?")
confidence, response = self.adapter.process(statement)
if self.python_version <= 2:
self.assertEqual(response.text, "( 100 / 100 ) = 1")
else:
self.assertEqual(response.text, "( 100 / 100 ) = 1.0")
def test_large_word_division_operator(self):
statement = Statement("What is one thousand two hundred four divided by one hundred?")
confidence, response = self.adapter.process(statement)
if self.python_version <= 2:
self.assertEqual(response.text, "( 1000 + 200 + 4 ) / ( 100 ) = 12")
else:
self.assertEqual(response.text, "( 1000 + 200 + 4 ) / ( 100 ) = 12.04")
def test_negative_multiplication(self):
statement = Statement("What is -105 * 5")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( -105 * 5 ) = -525")
def test_negative_decimal_multiplication(self):
statement = Statement("What is -100.5 * 20?")
confidence, response = self.adapter.process(statement)
self.assertEqual(response.text, "( -100.5 * 20 ) = -2010.0")
|
{
"content_hash": "35dd81d83f73d3cb3998607b4a14454a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 94,
"avg_line_length": 38.056,
"alnum_prop": 0.6361151986546143,
"repo_name": "DarkmatterVale/ChatterBot",
"id": "6cc7e4c716aa37aba646e519db5e9ca053c39a87",
"size": "4757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/logic_adapter_tests/test_evaluate_mathematically.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "113984"
}
],
"symlink_target": ""
}
|
import os
import random
import signal
import subprocess
from ...guestutils import get_container_name, get_service_name, get_node_list
def run_service(cmd, logtype='log', logbase=None, logtarget=None):
"""Wrap the execution of a service with the necessary logging nets.
If logbase is provided (it is by default), log output will be redirected
(or teed) to a file named after the container executing the service inside
the logbase directory.
If Redis nodes are available in the environment as referenced by the given
logtarget, log output will be streamed via pipestash to one of the
available node containers, chosen at random when the service starts.
The way this is accomplished varied on whether logbase is provided or not,
and whether Redis nodes are available:
- if neither, log output flows to stdout and will be captured by
Docker;
- if logbase is provided, but no Redis nodes are available, the
output of the service is directly redirected to the log file;
- if logbase is not provided, but Redis nodes are available, the
output of the service is piped to pipestash;
- if logbase is provided and Redis nodes are available, the output
of the service is piped to a tee that will write the log file, and
the output of the tee is piped to pipestash.
The whole pipeline, whatever its construct is, waits for the service to
terminate. SIGTERM is also redirected from the parent to the service.
"""
if type(cmd) == str:
cmd = cmd.split(' ')
log = logbase \
and os.path.join(logbase, '{}.log'.format(get_container_name())) \
or None
if logbase and not os.path.exists(logbase):
os.makedirs(logbase)
redis = logtarget \
and get_node_list(logtarget, ports=['redis'], minimum=0) \
or None
stdout = redis and subprocess.PIPE or (log and open(log, 'w+') or None)
# Start the service with the provided command.
service = subprocess.Popen(cmd, stdout=stdout,
stderr=subprocess.STDOUT)
last = service
# Connect SIGTERM to the service process.
signal.signal(signal.SIGTERM, lambda signum, frame: service.terminate())
if redis:
if log:
# Tee to a local log file.
tee = subprocess.Popen(['tee', log], stdin=last.stdout,
stdout=subprocess.PIPE)
last.stdout.close()
last = tee
pipestash = subprocess.Popen(
['pipestash', '-t', logtype,
'-r', 'redis://{}/0'.format(random.choice(redis)),
'-R', 'logstash',
'-f', 'service={}'.format(get_service_name()),
'-S', get_container_name()],
stdin=last.stdout)
last.stdout.close()
last = pipestash
# Wait for the service to exit and return its return code.
last.communicate()
return service.wait()
|
{
"content_hash": "7587ac2dbb2987a2f2ffcc122edde56e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 38.37179487179487,
"alnum_prop": 0.637821583695289,
"repo_name": "signalfx/maestro-ng",
"id": "3c74ecfe4a8981ccd1a96270601807c4a22cd6ca",
"size": "3322",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "maestro/extensions/logging/logstash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "300"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "197384"
},
{
"name": "Shell",
"bytes": "2038"
}
],
"symlink_target": ""
}
|
"""
This script is responsible for changing the desktop background.
Commands used:
- /usr/bin/osascript /usr/local/zetta/mac_os_scripts/external/change_background.scpt (path)
Scripts referenced:
- osascript change_background.scpt
on run argv
tell application "System Events"
set monitors to a reference to every desktop
set numMonitors to count (monitors)
repeat with monitorIndex from 1 to numMonitors by 1
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 1
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 2
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 3
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 4
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 5
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 6
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 7
set picture of item monitorIndex of the monitors to "" & item 1 of argv & "" -- display 8
end repeat
end tell
end run
"""
from common import CLITieIn
class BackgroundSetter(CLITieIn):
def change_background(self, background_path):
command = '/usr/bin/osascript /usr/local/zetta/mac_os_scripts/external/change_background.scpt {0}'.format(
background_path
)
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def run(self, background_path):
if not self.change_background(background_path):
self._logger.error('failed change_background with background_path={0}; cannot continue'.format(
background_path
))
return False
self._logger.debug('passed')
return True
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
parser.add_argument(
'-b',
'--background-path',
type=str,
required=True,
help='path for background file to use'
)
args = get_args(parser)
actor = BackgroundSetter(
sudo_password=args.sudo_password,
)
result = actor.run(
background_path=args.background_path
)
if not result:
exit(1)
exit(0)
|
{
"content_hash": "779e142ddbc417b7b0c36375f3dda93c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 114,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6216318785578747,
"repo_name": "initialed85/mac_os_scripts",
"id": "6cb2d942f4c7a0c16c8298dd501fa9682a841af7",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mac_os_scripts/set_background.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "222"
},
{
"name": "Python",
"bytes": "106177"
},
{
"name": "Shell",
"bytes": "9732"
}
],
"symlink_target": ""
}
|
import re
import sys
import json
import types
import shlex
import datetime
import ProcessGroups
import EventServiceUtils
from threading import Lock
from DBProxyPool import DBProxyPool
from brokerage.SiteMapper import SiteMapper
from dataservice.Setupper import Setupper
from dataservice.Closer import Closer
from dataservice.TaLauncher import TaLauncher
from dataservice.ProcessLimiter import ProcessLimiter
# logger
from pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('TaskBuffer')
class TaskBuffer:
"""
task queue
"""
# constructor
def __init__(self):
self.proxyPool = None
self.lock = Lock()
self.processLimiter = None
# initialize
def init(self,dbname,dbpass,nDBConnection=10,useTimeout=False):
# lock
self.lock.acquire()
# create Proxy Pool
if self.proxyPool == None:
self.proxyPool = DBProxyPool(dbname,dbpass,nDBConnection,useTimeout)
# create process limiter
if self.processLimiter == None:
self.processLimiter = ProcessLimiter()
# release
self.lock.release()
# check production role
def checkProdRole(self,fqans):
for fqan in fqans:
# check production role
match = re.search('/([^/]+)/Role=production',fqan)
if match != None:
return True,match.group(1)
return False,None
# get priority parameters for user
def getPrioParameters(self,jobs,user,fqans,userDefinedWG,validWorkingGroup):
withProdRole = False
workingGroup = None
priorityOffset = 0
serNum = 0
weight = None
# get DB proxy
proxy = self.proxyPool.getProxy()
# check production role
withProdRole,workingGroup = self.checkProdRole(fqans)
if withProdRole and jobs != []:
# check dataset name
for tmpFile in jobs[-1].Files:
if tmpFile.type in ['output','log'] and not tmpFile.lfn.startswith('group'):
# reset
withProdRole,workingGroup = False,None
break
# set high prioryty for production role
"""
if withProdRole and workingGroup in ['det-tile']:
serNum = 0
weight = 0.0
priorityOffset = 2000
"""
# reset nJob/weight for HC
if jobs != []:
if jobs[0].processingType in ['hammercloud','gangarobot','hammercloud-fax'] \
or jobs[0].processingType.startswith('gangarobot-'):
serNum = 0
weight = 0.0
if jobs[0].processingType in ['gangarobot','gangarobot-pft']:
priorityOffset = 3000
if jobs[0].processingType in ['hammercloud-fax']:
priorityOffset = 1001
# check quota
if weight == None:
weight = proxy.checkQuota(user)
# get nJob
if jobs == []:
serNum = proxy.getNumberJobsUser(user,workingGroup=userDefinedWG)
elif userDefinedWG and validWorkingGroup:
serNum = proxy.getNumberJobsUser(user,workingGroup=jobs[0].workingGroup)
else:
serNum = proxy.getNumberJobsUser(user,workingGroup=None)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return withProdRole,workingGroup,priorityOffset,serNum,weight
# store Jobs into DB
def storeJobs(self,jobs,user,joinThr=False,forkSetupper=False,fqans=[],hostname='',resetLocInSetupper=False,
checkSpecialHandling=True,toPending=False,oldPandaIDs=None,relationType=None, userVO='atlas'):
try:
_logger.debug("storeJobs : start for %s nJobs=%s" % (user,len(jobs)))
# check quota for priority calculation
weight = 0.0
userJobID = -1
userJobsetID = -1
userStatus = True
priorityOffset = 0
userCountry = None
useExpress = False
nExpressJobs = 0
useDebugMode = False
# check ban user except internally generated jobs
if len(jobs) > 0 and not jobs[0].prodSourceLabel in ProcessGroups.internalSourceLabels:
# get DB proxy
proxy = self.proxyPool.getProxy()
# check user status
tmpStatus = proxy.checkBanUser(user,jobs[0].prodSourceLabel)
# release proxy
self.proxyPool.putProxy(proxy)
# return if DN is blocked
if not tmpStatus:
_logger.debug("storeJobs : end for %s DN is blocked 1" % user)
return []
# set parameters for user jobs
if len(jobs) > 0 and (jobs[0].prodSourceLabel in ['user','panda','ptest','rc_test','ssc']) \
and (not jobs[0].processingType in ['merge','unmerge']):
# get DB proxy
proxy = self.proxyPool.getProxy()
# get JobID and status
userJobID,userJobsetID,userStatus = proxy.getUserParameter(user,jobs[0].jobDefinitionID,jobs[0].jobsetID)
# get site access
userSiteAccess = proxy.checkSiteAccess(jobs[0].computingSite,user)
# check quota for express jobs
if 'express' in jobs[0].specialHandling:
expressQuota = proxy.getExpressJobs(user)
if expressQuota != None and expressQuota['status'] and expressQuota['quota'] > 0:
nExpressJobs = expressQuota['quota']
if nExpressJobs > 0:
useExpress = True
# debug mode
if 'debug' in jobs[0].specialHandling:
debugJobList = proxy.getActiveDebugJobs(user)
if len(debugJobList) < ProcessGroups.maxDebugJobs:
useDebugMode = True
# release proxy
self.proxyPool.putProxy(proxy)
# get site spec
siteMapper = SiteMapper(self)
tmpSiteSpec = siteMapper.getSite(jobs[0].computingSite)
# check allowed groups
if userStatus and hasattr(tmpSiteSpec,'allowedgroups') and (not tmpSiteSpec.allowedgroups in ['',None]):
# set status to False when allowedgroups is defined
userStatus = False
# loop over all groups
for tmpGroup in tmpSiteSpec.allowedgroups.split(','):
if tmpGroup == '':
continue
# loop over all FQANs
for tmpFQAN in fqans:
if re.search('^%s' % tmpGroup,tmpFQAN) != None:
userStatus = True
break
# escape
if userStatus:
break
# get priority offset
if hasattr(tmpSiteSpec,'priorityoffset') and (not tmpSiteSpec.priorityoffset in ['',None]):
# loop over all groups
for tmpGP in tmpSiteSpec.priorityoffset.split(','):
if tmpGP == '':
continue
# get group and offset
tmpGroup = tmpGP.split(':')[0]
try:
tmpOffset = int(tmpGP.split(':')[-1])
except:
tmpOffset = 0
# loop over all FQANs
for tmpFQAN in fqans:
_logger.debug(tmpFQAN)
if re.search('^%s/' % tmpGroup,tmpFQAN) != None or \
re.search('%s$' % tmpGroup,tmpFQAN) != None:
# use the largest offset
if tmpOffset > priorityOffset:
priorityOffset = tmpOffset
break
# check site access
if hasattr(tmpSiteSpec,'accesscontrol') and tmpSiteSpec.accesscontrol == 'grouplist':
if userSiteAccess == {} or userSiteAccess['status'] != 'approved':
# user is not allowed
userStatus = False
# set priority offset
if userStatus:
if userSiteAccess.has_key('poffset') and userSiteAccess['poffset'] > priorityOffset:
priorityOffset = userSiteAccess['poffset']
# extract country group
for tmpFQAN in fqans:
match = re.search('^/atlas/([^/]+)/',tmpFQAN)
if match != None:
tmpCountry = match.group(1)
# use country code or usatlas
if len(tmpCountry) == 2:
userCountry = tmpCountry
break
# usatlas
if tmpCountry in ['usatlas']:
userCountry = 'us'
break
# return if DN is blocked
if not userStatus:
_logger.debug("storeJobs : end for %s DN is blocked 2" % user)
return []
# extract VO
for tmpFQAN in fqans:
match = re.search('^/([^/]+)/',tmpFQAN)
if match != None:
userVO = match.group(1)
break
# get number of jobs currently in PandaDB
serNum = 0
userDefinedWG = False
validWorkingGroup = False
usingBuild = False
withProdRole = False
workingGroup = None
if len(jobs) > 0 and (jobs[0].prodSourceLabel in ['user','panda']) \
and (not jobs[0].processingType in ['merge','unmerge']):
# check workingGroup
if not jobs[0].workingGroup in ['',None,'NULL']:
userDefinedWG = True
if userSiteAccess != {}:
if userSiteAccess['status'] == 'approved' and jobs[0].workingGroup in userSiteAccess['workingGroups']:
# valid workingGroup
validWorkingGroup = True
# using build for analysis
if jobs[0].prodSourceLabel == 'panda':
usingBuild = True
# get priority parameters for user
withProdRole,workingGroup,priorityOffset,serNum,weight = self.getPrioParameters(jobs,user,fqans,userDefinedWG,
validWorkingGroup)
# get DB proxy
proxy = self.proxyPool.getProxy()
# get group job serial number
groupJobSerialNum = 0
if len(jobs) > 0 and (jobs[0].prodSourceLabel in ['user','panda']) \
and (not jobs[0].processingType in ['merge','unmerge']):
for tmpFile in jobs[-1].Files:
if tmpFile.type in ['output','log'] and '$GROUPJOBSN' in tmpFile.lfn:
tmpSnRet = proxy.getSerialNumberForGroupJob(user)
if tmpSnRet['status']:
groupJobSerialNum = tmpSnRet['sn']
break
# loop over all jobs
ret =[]
newJobs=[]
usePandaDDM = False
firstLiveLog = True
nRunJob = 0
esJobsetMap = {}
for idxJob,job in enumerate(jobs):
# set JobID. keep original JobID when retry
if userJobID != -1 and job.prodSourceLabel in ['user','panda'] \
and (job.attemptNr in [0,'0','NULL'] or \
(not job.jobExecutionID in [0,'0','NULL']) or \
job.lockedby == 'jedi') \
and (not jobs[0].processingType in ['merge','unmerge']):
job.jobDefinitionID = userJobID
# set jobsetID
if job.prodSourceLabel in ['user','panda','ptest','rc_test']:
job.jobsetID = userJobsetID
# set specialHandling
if job.prodSourceLabel in ['user','panda']:
if checkSpecialHandling:
specialHandling = ''
# debug mode
if useDebugMode and nRunJob == 0 and job.prodSourceLabel == 'user':
specialHandling += 'debug,'
# express mode
if useExpress and (nRunJob < nExpressJobs or job.prodSourceLabel == 'panda'):
specialHandling += 'express,'
# get DDM backend
ddmBackEnd = job.getDdmBackEnd()
# reset specialHandling
specialHandling = specialHandling[:-1]
job.specialHandling = specialHandling
# set DDM backend
if ddmBackEnd != None:
job.setDdmBackEnd(ddmBackEnd)
if job.prodSourceLabel != 'panda':
nRunJob += 1
# set relocation flag
if job.computingSite != 'NULL' and job.relocationFlag != 2:
job.relocationFlag = 1
# protection agains empty jobParameters
if job.jobParameters in ['',None,'NULL']:
job.jobParameters = ' '
# set country group and nJobs (=taskID)
if job.prodSourceLabel in ['user','panda']:
if job.lockedby != 'jedi':
job.countryGroup = userCountry
# set workingGroup
if not validWorkingGroup:
if withProdRole:
# set country group if submitted with production role
job.workingGroup = workingGroup
else:
if userDefinedWG:
# reset invalid working group
job.workingGroup = None
# set nJobs (=taskID)
if usingBuild:
tmpNumBuild = 1
tmpNunRun = len(jobs) - 1
else:
tmpNumBuild = 0
tmpNunRun = len(jobs)
# encode
job.taskID = tmpNumBuild + (tmpNunRun << 1)
# change TRF URL just in case
if job.transformation.startswith('http://www.usatlas.bnl.gov/svn/panda/pathena/trf'):
job.transformation = re.sub('^http://www.usatlas.bnl.gov/svn/panda/pathena/trf/',
'http://pandaserver.cern.ch:25080/trf/user/',
job.transformation)
# set hostname
if hostname != '':
job.creationHost = hostname
# extract file info, chnage specialHandling for event service
eventServiceInfo,job.specialHandling,esIndex = EventServiceUtils.decodeFileInfo(job.specialHandling)
origEsJob = False
if eventServiceInfo != {}:
# set jobsetID
if esJobsetMap.has_key(esIndex):
job.jobsetID = esJobsetMap[esIndex]
else:
origEsJob = True
if oldPandaIDs != None and len(oldPandaIDs) > idxJob:
jobOldPandaIDs = oldPandaIDs[idxJob]
else:
jobOldPandaIDs = None
# insert job to DB
if not proxy.insertNewJob(job,user,serNum,weight,priorityOffset,userVO,groupJobSerialNum,
toPending,origEsJob,eventServiceInfo,oldPandaIDs=jobOldPandaIDs,
relationType=relationType):
# reset if failed
job.PandaID = None
else:
# live log
if job.prodSourceLabel in ['user','panda']:
if ' --liveLog ' in job.jobParameters:
# enable liveLog only for the first one
if firstLiveLog:
# set file name
repPatt = ' --liveLog stdout.%s ' % job.PandaID
else:
# remove the option
repPatt = ' '
job.jobParameters = re.sub(' --liveLog ',repPatt,job.jobParameters)
firstLiveLog = False
# append
newJobs.append(job)
# mapping of jobsetID for event service
if origEsJob:
esJobsetMap[esIndex] = job.jobsetID
if job.prodSourceLabel in ['user','panda','ptest','rc_test']:
ret.append((job.PandaID,job.jobDefinitionID,{'jobsetID':job.jobsetID}))
else:
ret.append((job.PandaID,job.jobDefinitionID,job.jobName))
serNum += 1
# release DB proxy
self.proxyPool.putProxy(proxy)
# set up dataset
if not toPending:
if joinThr:
thr = Setupper(self,newJobs,pandaDDM=usePandaDDM,forkRun=forkSetupper,resetLocation=resetLocInSetupper)
thr.start()
thr.join()
else:
# cannot use 'thr =' because it may trigger garbage collector
Setupper(self,newJobs,pandaDDM=usePandaDDM,forkRun=forkSetupper,resetLocation=resetLocInSetupper).start()
# return jobIDs
_logger.debug("storeJobs : end for %s succeeded" % user)
return ret
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("storeJobs : %s %s" % (errType,errValue))
return "ERROR: ServerError with storeJobs"
# lock jobs for reassign
def lockJobsForReassign(self,tableName,timeLimit,statList,labels,processTypes,sites,clouds,
useJEDI=False,onlyReassignable=False,useStateChangeTime=False):
# get DB proxy
proxy = self.proxyPool.getProxy()
# exec
res = proxy.lockJobsForReassign(tableName,timeLimit,statList,labels,processTypes,sites,clouds,
useJEDI,onlyReassignable,useStateChangeTime)
# release DB proxy
self.proxyPool.putProxy(proxy)
# return
return res
# lock jobs for finisher
def lockJobsForFinisher(self,timeNow,rownum,highPrio):
# get DB proxy
proxy = self.proxyPool.getProxy()
# exec
res = proxy.lockJobsForFinisher(timeNow,rownum,highPrio)
# release DB proxy
self.proxyPool.putProxy(proxy)
# return
return res
# get number of activated/defined jobs with output datasets
def getNumWaitingJobsWithOutDS(self,outputDSs):
# get DB proxy
proxy = self.proxyPool.getProxy()
# exec
res = proxy.getNumWaitingJobsWithOutDS(outputDSs)
# release DB proxy
self.proxyPool.putProxy(proxy)
# return
return res
# resubmit jobs
def resubmitJobs(self,jobIDs):
# get DB proxy
proxy = self.proxyPool.getProxy()
jobs=[]
# get jobs
for jobID in jobIDs:
res = proxy.peekJob(jobID,True,False,False,False)
if res:
jobs.append(res)
# release DB proxy
self.proxyPool.putProxy(proxy)
# set up dataset
if len(jobs) > 0:
Setupper(self,jobs).start()
# return jobIDs
return True
# update overall job information
def updateJobs(self,jobs,inJobsDefined,oldJobStatusList=None,extraInfo=None):
# get DB proxy
proxy = self.proxyPool.getProxy()
# loop over all jobs
returns = []
ddmIDs = []
ddmAttempt = 0
newMover = None
for idxJob,job in enumerate(jobs):
# update DB
tmpddmIDs = []
if oldJobStatusList != None and idxJob < len(oldJobStatusList):
oldJobStatus = oldJobStatusList[idxJob]
else:
oldJobStatus = None
if job.jobStatus == 'failed' and job.prodSourceLabel == 'user' and not inJobsDefined:
# keep failed analy jobs in Active4
ret = proxy.updateJob(job,inJobsDefined,oldJobStatus=oldJobStatus)
elif job.jobStatus in ['finished','failed','cancelled']:
ret,tmpddmIDs,ddmAttempt,newMover = proxy.archiveJob(job,inJobsDefined,extraInfo=extraInfo)
else:
ret = proxy.updateJob(job,inJobsDefined,oldJobStatus=oldJobStatus,extraInfo=extraInfo)
returns.append(ret)
# collect IDs for reassign
if ret:
ddmIDs += tmpddmIDs
# release proxy
self.proxyPool.putProxy(proxy)
# retry mover
if newMover != None:
self.storeJobs([newMover],None,joinThr=True)
# reassign jobs when ddm failed
if ddmIDs != []:
self.reassignJobs(ddmIDs,ddmAttempt,joinThr=True)
# return
return returns
# update job jobStatus only
def updateJobStatus(self,jobID,jobStatus,param,updateStateChange=False,attemptNr=None):
# get DB proxy
proxy = self.proxyPool.getProxy()
# update DB and buffer
if re.match('^finished$',jobStatus,re.I) or re.match('^failed$',jobStatus,re.I):
ret = proxy.archiveJobLite(jobID,jobStatus,param)
else:
ret = proxy.updateJobStatus(jobID,jobStatus,param,updateStateChange,attemptNr)
# release proxy
self.proxyPool.putProxy(proxy)
return ret
# finalize pending analysis jobs
def finalizePendingJobs(self,prodUserName,jobDefinitionID,waitLock=False):
# get DB proxy
proxy = self.proxyPool.getProxy()
# update DB
ret = proxy.finalizePendingJobs(prodUserName,jobDefinitionID,waitLock)
# release proxy
self.proxyPool.putProxy(proxy)
return ret
# retry job
def retryJob(self,jobID,param,failedInActive=False,changeJobInMem=False,inMemJob=None,
getNewPandaID=False,attemptNr=None,recoverableEsMerge=False):
# get DB proxy
proxy = self.proxyPool.getProxy()
# update DB
ret = proxy.retryJob(jobID,param,failedInActive,changeJobInMem,inMemJob,
getNewPandaID,attemptNr,recoverableEsMerge)
# release proxy
self.proxyPool.putProxy(proxy)
return ret
# retry failed analysis jobs in Active4
def retryJobsInActive(self,prodUserName,jobDefinitionID,isJEDI=False):
# get DB proxy
proxy = self.proxyPool.getProxy()
# update DB
ret = proxy.retryJobsInActive(prodUserName,jobDefinitionID,isJEDI)
# release proxy
self.proxyPool.putProxy(proxy)
return ret
# activate jobs
def activateJobs(self,jobs):
# get DB proxy
proxy = self.proxyPool.getProxy()
# loop over all jobs
returns = []
for job in jobs:
# update DB
ret = proxy.activateJob(job)
returns.append(ret)
# release proxy
self.proxyPool.putProxy(proxy)
return returns
# send jobs to jobsWaiting
def keepJobs(self,jobs):
# get DB proxy
proxy = self.proxyPool.getProxy()
# loop over all jobs
returns = []
for job in jobs:
# update DB
ret = proxy.keepJob(job)
returns.append(ret)
# release proxy
self.proxyPool.putProxy(proxy)
return returns
# delete stalled jobs
def deleteStalledJobs(self,libFileName):
# get DB proxy
proxy = self.proxyPool.getProxy()
# execute
ret = proxy.deleteStalledJobs(libFileName)
# release proxy
self.proxyPool.putProxy(proxy)
return ret
# set debug mode
def setDebugMode(self,dn,pandaID,prodManager,modeOn,workingGroup):
# get DB proxy
proxy = self.proxyPool.getProxy()
# check the number of debug jobs
hitLimit = False
if modeOn == True:
limitNum = None
if prodManager:
jobList = proxy.getActiveDebugJobs(prodRole=True)
limitNum = ProcessGroups.maxDebugProdJobs
elif workingGroup != None:
jobList = proxy.getActiveDebugJobs(workingGroup=workingGroup)
limitNum = ProcessGroups.maxDebugWgJobs
else:
jobList = proxy.getActiveDebugJobs(dn=dn)
limitNum = ProcessGroups.maxDebugJobs
if len(jobList) >= limitNum:
# exceeded
retStr = 'You already hit the limit on the maximum number of debug subjobs '
retStr += '(%s jobs). ' % limitNum
retStr += 'Please set the debug mode off for one of the following PandaIDs : '
for tmpID in jobList:
retStr += '%s,' % tmpID
retStr = retStr[:-1]
hitLimit = True
if not hitLimit:
# execute
retStr = proxy.setDebugMode(dn,pandaID,prodManager,modeOn,workingGroup)
# release proxy
self.proxyPool.putProxy(proxy)
return retStr
# get jobs
def getJobs(self,nJobs,siteName,prodSourceLabel,cpu,mem,diskSpace,node,timeout,computingElement,
atlasRelease,prodUserID,getProxyKey,countryGroup,workingGroup,allowOtherCountry,
taskID):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get waiting jobs
jobs,nSent = proxy.getJobs(nJobs,siteName,prodSourceLabel,cpu,mem,diskSpace,node,timeout,computingElement,
atlasRelease,prodUserID,countryGroup,workingGroup,allowOtherCountry,
taskID)
# release proxy
self.proxyPool.putProxy(proxy)
# get Proxy Key
proxyKey = {}
if getProxyKey and len(jobs) > 0:
# get MetaDB proxy
proxy = self.proxyPool.getProxy()
# get Proxy Key
proxyKey = proxy.getProxyKey(jobs[0].prodUserID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return jobs+[nSent,proxyKey]
# run task assignment
def runTaskAssignment(self,jobs):
# get DB proxy
proxy = self.proxyPool.getProxy()
# loop over all jobs
retList =[]
newJobs =[]
for job in jobs:
ret = None
if not job.taskID in ['NULL',0,'']:
# get cloud
cloudTask = proxy.getCloudTask(job.taskID)
if cloudTask != None and cloudTask.status == 'assigned':
ret = cloudTask.cloud
if ret == None:
# append for TA
newJobs.append(job)
retList.append(ret)
# release DB proxy
self.proxyPool.putProxy(proxy)
# run setupper
if newJobs != []:
TaLauncher(self,newJobs).start()
# return clouds
return retList
# reset modification time of a task to shorten retry interval
def resetTmodCloudTask(self,tid):
# get DBproxy
proxy = self.proxyPool.getProxy()
# run
res = proxy.resetTmodCloudTask(tid)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return res
# get assigning task
def getAssigningTask(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# run
res = proxy.getAssigningTask()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return res
# get fareshare policy
def getFaresharePolicy(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# run
res = proxy.getFaresharePolicy(True)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return res
# check merge job generation status
def checkMergeGenerationStatus(self,dn,jobID):
# return for NA
retNA = {'status':'NA','mergeIDs':[]}
try:
# get at most 2 PandaIDs
idStatus = self.getPandIDsWithJobID(dn,jobID,2)
if idStatus == {}:
return retNA
# use larger PandaID which corresponds to runXYZ
tmpKeys = idStatus.keys()
tmpKeys.sort()
pandaID = tmpKeys[-1]
# get job
tmpJobs = self.getFullJobStatus([pandaID])
if tmpJobs == [] or tmpJobs[0] == None:
return retNA
pandaJob = tmpJobs[0]
# non-merge job
if not '--mergeOutput' in pandaJob.jobParameters:
return retNA
# loop over all sub datasets
subDsList = []
mergeStatus = None
mergeIDs = []
for tmpFile in pandaJob.Files:
if tmpFile.type in ['output','log']:
if not tmpFile.destinationDBlock in subDsList:
subDsList.append(tmpFile.destinationDBlock)
# get dataset
tmpDsSpec = self.queryDatasetWithMap({'name':tmpFile.destinationDBlock})
if tmpDsSpec != None:
if tmpDsSpec.status in ['tobemerged']:
# going to be merged
mergeStatus = 'generating'
mergeIDs = []
elif tmpDsSpec.status in ['tobeclosed','closed','completed']:
# another dataset from --individualOutDS is waiting for Merger
if mergeStatus == 'generating':
continue
# set status
mergeStatus = 'generated'
# collect JobIDs of merge jobs
tmpMergeID = tmpDsSpec.MoverID
if not tmpMergeID in [0,None,'NULL']+mergeIDs:
mergeIDs.append(tmpMergeID)
# no merger most likely because jobs were killed
if mergeStatus == 'generated' and mergeIDs == []:
mergeStatus = 'aborted'
# jobs are still runnign
if mergeStatus == None:
mergeStatus = 'standby'
# return
return {'status':mergeStatus,'mergeIDs':mergeIDs}
except:
return retNA
# get job status
def getJobStatus(self,jobIDs,fromDefined=True,fromActive=True,fromArchived=True,fromWaiting=True):
# get DBproxy
proxy = self.proxyPool.getProxy()
retStatus = []
# peek at job
for jobID in jobIDs:
res = proxy.peekJob(jobID,fromDefined,fromActive,fromArchived,fromWaiting)
if res:
retStatus.append(res.jobStatus)
else:
retStatus.append(None)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retStatus
# peek at jobs
def peekJobs(self,jobIDs,fromDefined=True,fromActive=True,fromArchived=True,fromWaiting=True,forAnal=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
retJobs = []
# peek at job
for jobID in jobIDs:
res = proxy.peekJob(jobID,fromDefined,fromActive,fromArchived,fromWaiting,forAnal)
if res:
retJobs.append(res)
else:
retJobs.append(None)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retJobs
# get PandaID with jobexeID
def getPandaIDwithJobExeID(self,jobexeIDs):
# get DBproxy
proxy = self.proxyPool.getProxy()
retJobs = []
# peek at job
for jobexeID in jobexeIDs:
res = proxy.getPandaIDwithJobExeID(jobexeID)
retJobs.append(res)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retJobs
# get PandaIDs with TaskID
def getPandaIDsWithTaskID(self,jediTaskID):
# get DBproxy
proxy = self.proxyPool.getProxy()
# exec
retJobs = proxy.getPandaIDsWithTaskID(jediTaskID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retJobs
# get slimmed file info with PandaIDs
def getSlimmedFileInfoPandaIDs(self,pandaIDs):
iPandaID = 0
nPandaID = 100
retInfo = {}
while iPandaID < len(pandaIDs):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
tmpRetInfo = proxy.getSlimmedFileInfoPandaIDs(pandaIDs[iPandaID:iPandaID+nPandaID])
# release proxy
self.proxyPool.putProxy(proxy)
iPandaID += nPandaID
if retInfo == {}:
retInfo = tmpRetInfo
else:
for outKey in tmpRetInfo.keys():
if not retInfo.has_key(outKey):
retInfo[outKey] = []
# append
for tmpItemRetInfo in tmpRetInfo[outKey]:
if not tmpItemRetInfo in retInfo[outKey]:
retInfo[outKey].append(tmpItemRetInfo)
# return
return retInfo
# get JobIDs in a time range
def getJobIDsInTimeRange(self,dn,timeRangeStr):
# check DN
if dn in ['NULL','','None',None]:
return []
# check timeRange
match = re.match('^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)$',timeRangeStr)
if match == None:
return []
timeRange = datetime.datetime(year = int(match.group(1)),
month = int(match.group(2)),
day = int(match.group(3)),
hour = int(match.group(4)),
minute = int(match.group(5)),
second = int(match.group(6)))
# max range is 3 months
maxRange = datetime.datetime.utcnow() - datetime.timedelta(days=30)
if timeRange < maxRange:
timeRange = maxRange
retJobIDs = []
# get DBproxy
proxy = self.proxyPool.getProxy()
# get JobIDs
retJobIDs = proxy.getJobIDsInTimeRange(dn,timeRange,retJobIDs)
# release proxy
self.proxyPool.putProxy(proxy)
# read ARCH when time window is more than 3days (- 3 hours as a margin)
if timeRange < datetime.datetime.utcnow() - datetime.timedelta(days=2,hours=21) :
# get ArchiveDBproxy
proxy = self.proxyPool.getProxy()
# get JobIDs
retJobIDs = proxy.getJobIDsInTimeRangeLog(dn,timeRange,retJobIDs)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retJobIDs
# get PandaIDs for a JobID
def getPandIDsWithJobID(self,dn,jobID,nJobs):
idStatus = {}
# check DN
if dn in ['NULL','','None',None]:
return idStatus
# check JobID
try:
jobID = long(jobID)
nJobs = long(nJobs)
except:
return idStatus
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
idStatus,buildJobID = proxy.getPandIDsWithJobID(dn,jobID,idStatus,nJobs)
# release proxy
self.proxyPool.putProxy(proxy)
# get ArchiveDBproxy
proxy = self.proxyPool.getProxy()
# get IDs
idStatus = proxy.getPandIDsWithJobIDLog(dn,jobID,idStatus,nJobs,buildJobID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return idStatus
# get PandaIDs for a JobsetID or JobdefID in jobsArchived
def getPandIDsWithIdInArch(self,prodUserName,id,isJobset):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getPandIDsWithIdInArch(prodUserName,id,isJobset)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get beyond pledge resource ratio
# ! this method is not thread-safe
def getPledgeResourceRatio(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getPledgeResourceRatio()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return proxy.beyondPledgeRatio
# get the number of waiting jobs with a dataset
def getNumWaitingJobsForPD2P(self,datasetName):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
nJobs = proxy.getNumWaitingJobsForPD2P(datasetName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return nJobs
# get the number of waiting jobsets with a dataset
def getNumWaitingJobsetsForPD2P(self,datasetName):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
nJobs = proxy.getNumWaitingJobsetsForPD2P(datasetName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return nJobs
# lock job for re-brokerage
def lockJobForReBrokerage(self,dn,jobID,simulation,forceOpt,forFailed=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
ret = proxy.lockJobForReBrokerage(dn,jobID,simulation,forceOpt,forFailed)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# reset buildJob for re-brokerage
def resetBuildJobForReBrokerage(self,pandaID):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
ret = proxy.resetBuildJobForReBrokerage(pandaID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get PandaIDs using libDS for re-brokerage
def getPandaIDsForReBrokerage(self,userName,jobID,fromActive,forFailed=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
ret = proxy.getPandaIDsForReBrokerage(userName,jobID,fromActive,forFailed)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get input datasets for rebroerage
def getInDatasetsForReBrokerage(self,jobID,userName):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
ret = proxy.getInDatasetsForReBrokerage(jobID,userName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get outDSs with userName/jobID
def getOutDSsForReBrokerage(self,userName,jobID):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get IDs
ret = proxy.getOutDSsForReBrokerage(userName,jobID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get full job status
def getFullJobStatus(self,jobIDs,fromDefined=True,fromActive=True,fromArchived=True,fromWaiting=True,forAnal=True):
retJobMap = {}
# peek at job
for jobID in jobIDs:
# get DBproxy for each job to avoid occupying connection for long time
proxy = self.proxyPool.getProxy()
# peek job
res = proxy.peekJob(jobID,fromDefined,fromActive,fromArchived,fromWaiting,forAnal)
retJobMap[jobID] = res
# release proxy
self.proxyPool.putProxy(proxy)
# get IDs
for jobID in jobIDs:
if retJobMap[jobID] == None:
# get ArchiveDBproxy
proxy = self.proxyPool.getProxy()
# peek job
res = proxy.peekJobLog(jobID)
retJobMap[jobID] = res
# release proxy
self.proxyPool.putProxy(proxy)
# sort
retJobs = []
for jobID in jobIDs:
retJobs.append(retJobMap[jobID])
# return
return retJobs
# get script for offline running
def getScriptOfflineRunning(self,pandaID):
try:
# get job
tmpJobs = self.getFullJobStatus([pandaID])
if tmpJobs == [] or tmpJobs[0] == None:
return "ERROR: Cannot get PandaID=%s in DB for the last 30 days" % pandaID
tmpJob = tmpJobs[0]
# check prodSourceLabel
if not tmpJob.prodSourceLabel in ['managed','test']:
return "ERROR: Non production job : prodSourceLabel=%s. This method is only for production jobs" % tmpJob.prodSourceLabel
# release and trf
tmpRels = tmpJob.homepackage.split("\n")
tmpPars = tmpJob.jobParameters.split("\n")
tmpTrfs = tmpJob.transformation.split("\n")
if not (len(tmpRels) == len(tmpPars) == len(tmpTrfs)):
return "ERROR: The number of releases or parameters or trfs is inconsitent with others"
# construct script
scrStr = "#retrieve inputs\n\n"
# collect inputs
dsFileMap = {}
for tmpFile in tmpJob.Files:
if tmpFile.type=='input':
if not dsFileMap.has_key(tmpFile.dataset):
dsFileMap[tmpFile.dataset] = []
if not tmpFile.lfn in dsFileMap[tmpFile.dataset]:
dsFileMap[tmpFile.dataset].append(tmpFile.scope+':'+tmpFile.lfn)
# get
for tmpDS,tmpFileList in dsFileMap.iteritems():
for tmpLFN in tmpFileList:
scrStr += "rucio download "
scrStr += "%s\n" % tmpLFN
# ln
tmpScope,tmpBareLFN = tmpLFN.split(':')
scrStr += "ln -fs %s/%s ./%s\n" % (tmpScope,tmpBareLFN,tmpBareLFN)
scrStr += "\n#transform commands\n\n"
for tmpIdx,tmpRel in enumerate(tmpRels):
# asetup
scrStr += "asetup --cmtconfig=%s %s,%s\n" % tuple([tmpJob.cmtConfig]+tmpRel.split("/"))
# athenaMP
if not tmpJob.coreCount in ['NULL',None] and tmpJob.coreCount > 1:
scrStr += "export ATHENA_PROC_NUMBER=%s\n" % tmpJob.coreCount
# add double quotes for zsh
tmpParamStr = tmpPars[tmpIdx]
tmpSplitter = shlex.shlex(tmpParamStr, posix=True)
tmpSplitter.whitespace = ' '
tmpSplitter.whitespace_split = True
# loop for params
for tmpItem in tmpSplitter:
tmpMatch = re.search('^(-[^=]+=)(.+)$',tmpItem)
if tmpMatch != None:
tmpArgName = tmpMatch.group(1)
tmpArgVal = tmpMatch.group(2)
tmpArgIdx = tmpParamStr.find(tmpArgName) + len(tmpArgName)
# add "
if tmpParamStr[tmpArgIdx] != '"':
tmpParamStr = tmpParamStr.replace(tmpMatch.group(0),
tmpArgName+'"'+tmpArgVal+'"')
# run trf
scrStr += "%s %s\n\n" % (tmpTrfs[tmpIdx],tmpParamStr)
return scrStr
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("getScriptOfflineRunning : %s %s" % (errType,errValue))
return "ERROR: ServerError in getScriptOfflineRunning with %s %s" % (errType,errValue)
# kill jobs
def killJobs(self,ids,user,code,prodManager,wgProdRole=[]):
# get DBproxy
proxy = self.proxyPool.getProxy()
rets = []
# kill jobs
pandaIDforCloserMap = {}
for id in ids:
ret,userInfo = proxy.killJob(id,user,code,prodManager,True,wgProdRole)
rets.append(ret)
if ret and userInfo['prodSourceLabel'] in ['user','managed','test']:
jobIDKey = (userInfo['prodUserID'],userInfo['jobDefinitionID'],userInfo['jobsetID'])
if not pandaIDforCloserMap.has_key(jobIDKey):
pandaIDforCloserMap[jobIDKey] = id
# release proxy
self.proxyPool.putProxy(proxy)
# run Closer
try:
if pandaIDforCloserMap != {}:
for pandaIDforCloser in pandaIDforCloserMap.values():
tmpJobs = self.peekJobs([pandaIDforCloser])
tmpJob = tmpJobs[0]
if tmpJob != None:
tmpDestDBlocks = []
# get destDBlock
for tmpFile in tmpJob.Files:
if tmpFile.type in ['output','log']:
if not tmpFile.destinationDBlock in tmpDestDBlocks:
tmpDestDBlocks.append(tmpFile.destinationDBlock)
# run
cThr = Closer(self,tmpDestDBlocks,tmpJob)
cThr.start()
cThr.join()
except:
pass
# return
return rets
# reassign jobs
def reassignJobs(self,ids,attempt=0,joinThr=False,forkSetupper=False,forPending=False,
firstSubmission=True):
# get DBproxy
proxy = self.proxyPool.getProxy()
jobs = []
oldSubMap = {}
# keep old assignment
keepSiteFlag = False
if (attempt % 2) != 0:
keepSiteFlag = True
# reset jobs
for id in ids:
try:
# try to reset active job
if not forPending:
tmpRet = proxy.resetJob(id,keepSite=keepSiteFlag,getOldSubs=True)
if isinstance(tmpRet,types.TupleType):
ret,tmpOldSubList = tmpRet
else:
ret,tmpOldSubList = tmpRet,[]
if ret != None:
jobs.append(ret)
for tmpOldSub in tmpOldSubList:
if not oldSubMap.has_key(tmpOldSub):
oldSubMap[tmpOldSub] = ret
continue
# try to reset waiting job
tmpRet = proxy.resetJob(id,False,keepSite=keepSiteFlag,getOldSubs=False,forPending=forPending)
if isinstance(tmpRet,types.TupleType):
ret,tmpOldSubList = tmpRet
else:
ret,tmpOldSubList = tmpRet,[]
if ret != None:
jobs.append(ret)
# waiting jobs don't create sub or dis
continue
# try to reset defined job
if not forPending:
tmpRet = proxy.resetDefinedJob(id,keepSite=keepSiteFlag,getOldSubs=True)
if isinstance(tmpRet,types.TupleType):
ret,tmpOldSubList = tmpRet
else:
ret,tmpOldSubList = tmpRet,[]
if ret != None:
jobs.append(ret)
for tmpOldSub in tmpOldSubList:
if not oldSubMap.has_key(tmpOldSub):
oldSubMap[tmpOldSub] = ret
continue
except:
pass
# release DB proxy
self.proxyPool.putProxy(proxy)
# run Closer for old sub datasets
if not forPending:
for tmpOldSub,tmpJob in oldSubMap.iteritems():
cThr = Closer(self,[tmpOldSub],tmpJob)
cThr.start()
cThr.join()
# setup dataset
if jobs != []:
if joinThr:
thr = Setupper(self,jobs,resubmit=True,ddmAttempt=attempt,forkRun=forkSetupper,
firstSubmission=firstSubmission)
thr.start()
thr.join()
else:
# cannot use 'thr =' because it may trigger garbage collector
Setupper(self,jobs,resubmit=True,ddmAttempt=attempt,forkRun=forkSetupper,
firstSubmission=firstSubmission).start()
# return
return True
# awake jobs in jobsWaiting
def awakeJobs(self,ids):
# get DBproxy
proxy = self.proxyPool.getProxy()
jobs = []
# reset jobs
for id in ids:
# try to reset waiting job
ret = proxy.resetJob(id,False)
if ret != None:
jobs.append(ret)
# release DB proxy
self.proxyPool.putProxy(proxy)
# setup dataset
Setupper(self,jobs).start()
# return
return True
# query PandaIDs
def queryPandaIDs(self,jobDefIDs):
# get DBproxy
proxy = self.proxyPool.getProxy()
pandaIDs = []
# query PandaID
for jobDefID in jobDefIDs:
id = proxy.queryPandaID(jobDefID)
pandaIDs.append(id)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return pandaIDs
# query job info per cloud
def queryJobInfoPerCloud(self,cloud,schedulerID=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query job info
ret = proxy.queryJobInfoPerCloud(cloud,schedulerID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get PandaIDs to be updated in prodDB
def getPandaIDsForProdDB(self,limit,lockedby):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query PandaID
ret = proxy.getPandaIDsForProdDB(limit,lockedby)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update prodDBUpdateTime
def updateProdDBUpdateTimes(self,paramList):
retList = []
# get DBproxy
proxy = self.proxyPool.getProxy()
# update
for param in paramList:
ret = proxy.updateProdDBUpdateTime(param)
retList.append(ret)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# get PandaIDs at Site
def getPandaIDsSite(self,site,status,limit):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query PandaID
ids = proxy.getPandaIDsSite(site,status,limit)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ids
# get input files currently in used for analysis
def getFilesInUseForAnal(self,outDataset):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# query LFNs
retList = proxy.getFilesInUseForAnal(outDataset)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# get list of dis dataset to get input files in shadow
def getDisInUseForAnal(self,outDataset):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query dis
retList = proxy.getDisInUseForAnal(outDataset)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# get input LFNs currently in use for analysis with shadow dis
def getLFNsInUseForAnal(self,inputDisList):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query dis
retList = proxy.getLFNsInUseForAnal(inputDisList)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# update input files and return corresponding PandaIDs
def updateInFilesReturnPandaIDs(self,dataset,status,fileLFN=''):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# query PandaID
retList = proxy.updateInFilesReturnPandaIDs(dataset,status,fileLFN)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# update file status in dispatch dataset
def updateFileStatusInDisp(self,dataset,fileStatusMap):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query PandaID
retVal = proxy.updateFileStatusInDisp(dataset,fileStatusMap)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retVal
# update output files and return corresponding PandaIDs
def updateOutFilesReturnPandaIDs(self,dataset,fileLFN=''):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# query PandaID
retList = proxy.updateOutFilesReturnPandaIDs(dataset,fileLFN)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# get datasets associated with file
def getDatasetWithFile(self,lfn,jobPrioity=0):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query PandaID
retList = proxy.getDatasetWithFile(lfn,jobPrioity)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# get _dis datasets associated to _sub
def getAssociatedDisDatasets(self,subDsName):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# query
retList = proxy.getAssociatedDisDatasets(subDsName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# insert sandbox file info
def insertSandboxFileInfo(self,userName,hostName,fileName,fileSize,checkSum):
# get DBproxy
proxy = self.proxyPool.getProxy()
# exec
ret= proxy.insertSandboxFileInfo(userName,hostName,fileName,fileSize,checkSum)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check duplicated sandbox file
def checkSandboxFile(self,userName,fileSize,checkSum):
# get DBproxy
proxy = self.proxyPool.getProxy()
# exec
ret= proxy.checkSandboxFile(userName,fileSize,checkSum)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# insert datasets
def insertDatasets(self,datasets):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# insert
for dataset in datasets:
ret= proxy.insertDataset(dataset)
retList.append(ret)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# query Dataset
def queryDatasetWithMap(self,map):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query Dataset
ret = proxy.queryDatasetWithMap(map)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# query last files in a dataset
def queryLastFilesInDataset(self,datasets):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query files
ret = proxy.queryLastFilesInDataset(datasets)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# set GUIDs
def setGUIDs(self,files):
# get DBproxy
proxy = self.proxyPool.getProxy()
# set GUIDs
ret = proxy.setGUIDs(files)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# query PandaID with dataset
def queryPandaIDwithDataset(self,datasets):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query Dataset
ret = proxy.queryPandaIDwithDataset(datasets)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# query PandaID with filenames
def queryPandaIDwithLFN(self,lfns):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query Dataset
ret = proxy.queryPandaIDwithLFN(lfns)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update dataset
def updateDatasets(self,datasets,withLock=False,withCriteria="",criteriaMap={}):
# get DBproxy
proxy = self.proxyPool.getProxy()
# update Dataset
retList = proxy.updateDataset(datasets,withLock,withCriteria,criteriaMap)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# delete dataset
def deleteDatasets(self,datasets):
# get DBproxy
proxy = self.proxyPool.getProxy()
retList = []
# query Dataset
for dataset in datasets:
ret = proxy.deleteDataset(dataset)
retList.append(ret)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# query files with map
def queryFilesWithMap(self,map):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query files
ret = proxy.queryFilesWithMap(map)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# count the number of files with map
def countFilesWithMap(self,map):
# get DBproxy
proxy = self.proxyPool.getProxy()
# query files
ret = proxy.countFilesWithMap(map)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# count the number of pending files
def countPendingFiles(self,pandaID,forInput=True):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count files
ret = proxy.countPendingFiles(pandaID,forInput)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get serial number for dataset
def getSerialNumber(self,datasetname,definedFreshFlag=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getSerialNumber(datasetname,definedFreshFlag)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get serial number for group job
def getSerialNumberForGroupJob(self,name):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getSerialNumberForGroupJob(name)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# add metadata
def addMetadata(self,ids,metadataList):
# get DBproxy
proxy = self.proxyPool.getProxy()
# add metadata
index = 0
retList = []
for id in ids:
ret = proxy.addMetadata(id,metadataList[index])
retList.append(ret)
index += 1
# release proxy
self.proxyPool.putProxy(proxy)
# return
return retList
# add stdout
def addStdOut(self,id,stdout):
# get DBproxy
proxy = self.proxyPool.getProxy()
# add
ret = proxy.addStdOut(id,stdout)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# extract name from DN
def cleanUserID(self,id):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.cleanUserID(id)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# extract scope from dataset name
def extractScope(self,name):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.extractScope(name)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# change job priorities
def changeJobPriorities(self,newPrioMap):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.changeJobPriorities(newPrioMap)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get destinationDBlockToken for a dataset
def getDestTokens(self,dsname):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get token
ret = proxy.getDestTokens(dsname)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get destinationSE for a dataset
def getDestSE(self,dsname,fromArch=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get token
ret = proxy.getDestSE(dsname,fromArch)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get job statistics
def getJobStatistics(self,archived=False,predefined=False,workingGroup='',countryGroup='',jobType='',forAnal=None,minPriority=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getJobStatistics(archived,predefined,workingGroup,countryGroup,jobType,forAnal,minPriority)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get job statistics with label
def getJobStatisticsWithLabel(self,siteStr=''):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getJobStatisticsWithLabel(siteStr)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get job statistics for brokerage
def getJobStatisticsBrokerage(self,minPrio=None,maxPrio=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get stat
ret = proxy.getJobStatisticsBrokerage(minPrio,maxPrio)
# release proxy
self.proxyPool.putProxy(proxy)
# convert
conRet = ProcessGroups.countJobsPerGroup(ret)
# return
return conRet
# get job statistics for analysis brokerage
def getJobStatisticsAnalBrokerage(self,minPriority=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get stat
ret = proxy.getJobStatisticsAnalBrokerage(minPriority=minPriority)
# release proxy
self.proxyPool.putProxy(proxy)
# convert
conRet = ProcessGroups.countJobsPerGroupForAnal(ret)
# return
return conRet
# get the number of waiting jobs per site and user
def getJobStatisticsPerUserSite(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get stat
ret = proxy.getJobStatisticsPerUserSite()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get highest prio jobs
def getHighestPrioJobStat(self,perPG=False,useMorePG=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get stat
if not perPG:
ret = proxy.getHighestPrioJobStat()
else:
ret = proxy.getHighestPrioJobStatPerPG(useMorePG)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get queued analysis jobs at a site
def getQueuedAnalJobs(self,site,dn):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get stat
ret = proxy.getQueuedAnalJobs(site,dn)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get job statistics for ExtIF
def getJobStatisticsForExtIF(self,sourcetype=None):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getJobStatisticsForExtIF(sourcetype)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get job statistics for Bamboo
def getJobStatisticsForBamboo(self,useMorePG=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getJobStatisticsPerProcessingType(useMorePG)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get number of analysis jobs per user
def getNUserJobs(self,siteName):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get number of analysis jobs per user
tmpRet = proxy.getNUserJobs(siteName)
# release proxy
self.proxyPool.putProxy(proxy)
# get log proxy
proxy = self.proxyPool.getProxy()
# get Proxy Key
ret = {}
for userID,nJobs in tmpRet.iteritems():
proxyKey = proxy.getProxyKey(userID)
if proxyKey != {}:
# add nJobs
proxyKey['nJobs'] = nJobs
# append
ret[userID] = proxyKey
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get number of activated analysis jobs
def getNAnalysisJobs(self,nProcesses):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count
ret = proxy.getNAnalysisJobs(nProcesses)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update transfer status for a dataset
def updateTransferStatus(self,datasetname,bitMap):
# get DBproxy
proxy = self.proxyPool.getProxy()
# update
ret = proxy.updateTransferStatus(datasetname,bitMap)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get CloudTask
def getCloudTask(self,tid):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count
ret = proxy.getCloudTask(tid)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# set cloud to CloudTask
def setCloudTask(self,cloudTask):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count
ret = proxy.setCloudTask(cloudTask)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# see CloudTask
def seeCloudTask(self,tid):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count
ret = proxy.seeCloudTask(tid)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# set cloud to CloudTask by user
def setCloudTaskByUser(self,user,tid,cloud,status,forceUpdate=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# count
ret = proxy.setCloudTaskByUser(user,tid,cloud,status,forceUpdate)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update site data
def updateSiteData(self,hostID,pilotRequests):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.updateSiteData(hostID,pilotRequests)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get current site data
def getCurrentSiteData(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getCurrentSiteData()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# insert nRunning in site data
def insertnRunningInSiteData(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.insertnRunningInSiteData()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get nRunning in site data
def getnRunningInSiteData(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get serial number
ret = proxy.getnRunningInSiteData()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get site list
def getSiteList(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get site info
ret = proxy.getSiteList()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get site info
def getSiteInfo(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get site info
ret = proxy.getSiteInfo()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get cloud list
def getCloudList(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get cloud list
ret = proxy.getCloudList()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check sites with release/cache
def checkSitesWithRelease(self,sites,releases=None,caches=None,cmtConfig=None,onlyCmtConfig=False,
cmtConfigPattern=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# check
ret = proxy.checkSitesWithRelease(sites,releases,caches,cmtConfig,onlyCmtConfig,cmtConfigPattern)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get sites with release/cache in cloud
def getSitesWithReleaseInCloud(self,cloud,releases=None,caches=None,validation=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# check
ret = proxy.getSitesWithReleaseInCloud(cloud,releases,caches,validation)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get list of cache prefix
def getCachePrefixes(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# check
ret = proxy.getCachePrefixes()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get list of cmtConfig
def getCmtConfigList(self,relaseVer):
# get DBproxy
proxy = self.proxyPool.getProxy()
# check
ret = proxy.getCmtConfigList(relaseVer)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get pilot owners
def getPilotOwners(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get pilot owners
ret = proxy.getPilotOwners()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get allowed nodes
def getAllowedNodes(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getAllowedNodes()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get special dipatcher parameters
def getSpecialDispatchParams(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getSpecialDispatchParams()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get email address
def getEmailAddr(self,name,withDN=False,withUpTime=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getEmailAddr(name,withDN,withUpTime)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# set email address for a user
def setEmailAddr(self,userName,emailAddr):
# get DBproxy
proxy = self.proxyPool.getProxy()
# set
ret = proxy.setEmailAddr(userName,emailAddr)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get client version
def getPandaClientVer(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getPandaClientVer()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# register proxy key
def registerProxyKey(self,params):
# get DBproxy
proxy = self.proxyPool.getProxy()
# register proxy key
ret = proxy.registerProxyKey(params)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# register proxy key
def registerProxyKey(self,params):
# get DBproxy
proxy = self.proxyPool.getProxy()
# register proxy key
ret = proxy.registerProxyKey(params)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get proxy key
def getProxyKey(self,dn):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get proxy key
ret = proxy.getProxyKey(dn)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# add account to siteaccess
def addSiteAccess(self,siteID,dn):
# get DBproxy
proxy = self.proxyPool.getProxy()
# add account to siteaccess
ret = proxy.addSiteAccess(siteID,dn)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# list site access
def listSiteAccess(self,siteid,dn,longFormat=False):
# get DBproxy
proxy = self.proxyPool.getProxy()
# list site access
ret = proxy.listSiteAccess(siteid,dn,longFormat)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update site access
def updateSiteAccess(self,method,siteid,requesterDN,userName,attrValue):
# get DBproxy
proxy = self.proxyPool.getProxy()
# update site access
ret = proxy.updateSiteAccess(method,siteid,requesterDN,userName,attrValue)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# generate pilot token
def genPilotToken(self,schedulerhost,scheduleruser,schedulerid):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.genPilotToken(schedulerhost,scheduleruser,schedulerid)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# add files to memcached
def addFilesToMemcached(self,site,node,files):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.addFilesToMemcached(site,node,files)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# delete files from memcached
def deleteFilesFromMemcached(self,site,node,files):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.deleteFilesFromMemcached(site,node,files)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# flush memcached
def flushMemcached(self,site,node):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.flushMemcached(site,node)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check files with memcached
def checkFilesWithMemcached(self,site,node,files):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.checkFilesWithMemcached(site,node,files)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get list of scheduler users
def getListSchedUsers(self):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getListSchedUsers()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# query an SQL return Status
def querySQLS(self,sql,varMap,arraySize=1000):
# get DBproxy
proxy = self.proxyPool.getProxy()
# get
ret = proxy.querySQLS(sql,varMap,arraySize)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check quota
def checkQuota(self,dn):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.checkQuota(dn)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get JobID for user
def getJobIdUser(self,dn):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getJobIdUser(dn)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get user subscriptions
def getUserSubscriptions(self,datasetName,timeRange):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getUserSubscriptions(datasetName,timeRange)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get the number of user subscriptions
def getNumUserSubscriptions(self):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getNumUserSubscriptions()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# add user subscriptions
def addUserSubscription(self,datasetName,dq2IDs):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.addUserSubscription(datasetName,dq2IDs)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# increment counter for subscription
def incrementUsedCounterSubscription(self,datasetName):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.incrementUsedCounterSubscription(datasetName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get active datasets
def getActiveDatasets(self,computingSite,prodSourceLabel):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getActiveDatasets(computingSite,prodSourceLabel)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check status of all sub datasets to trigger Notifier
def checkDatasetStatusForNotifier(self,jobsetID,jobDefinitionID,prodUserName):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.checkDatasetStatusForNotifier(jobsetID,jobDefinitionID,prodUserName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get MoU share for T2 PD2P
def getMouShareForT2PD2P(self):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# get
ret = proxy.getMouShareForT2PD2P()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# insert TaskParams
def insertTaskParamsPanda(self,taskParams,user,prodRole,fqans=[],parent_tid=None,properErrorCode=False):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# check user status
tmpStatus = proxy.checkBanUser(user,None,True)
if tmpStatus == True:
# exec
ret = proxy.insertTaskParamsPanda(taskParams,user,prodRole,fqans,parent_tid,properErrorCode)
elif tmpStatus == 1:
ret = False,"Failed to update DN in PandaDB"
elif tmpStatus == 2:
ret = False,"Failed to insert user info to PandaDB"
else:
ret = False,"The following DN is banned: DN={0}".format(user)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# send command to task
def sendCommandTaskPanda(self,jediTaskID,dn,prodRole,comStr,comComment=None,useCommit=True,properErrorCode=False,
comQualifier=None):
# query an SQL return Status
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.sendCommandTaskPanda(jediTaskID,dn,prodRole,comStr,comComment,useCommit,
properErrorCode,comQualifier)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update unmerged datasets to trigger merging
def updateUnmergedDatasets(self,job,finalStatusDS):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.updateUnmergedDatasets(job,finalStatusDS)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get active JediTasks in a time range
def getJediTasksInTimeRange(self,dn,timeRangeStr):
# check DN
if dn in ['NULL','','None',None]:
return {}
# check timeRange
match = re.match('^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)$',timeRangeStr)
if match == None:
return {}
timeRange = datetime.datetime(year = int(match.group(1)),
month = int(match.group(2)),
day = int(match.group(3)),
hour = int(match.group(4)),
minute = int(match.group(5)),
second = int(match.group(6)))
# max range is 3 months
maxRange = datetime.datetime.utcnow() - datetime.timedelta(days=30)
if timeRange < maxRange:
timeRange = maxRange
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getJediTasksInTimeRange(dn,timeRange)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get details of JediTask
def getJediTaskDetails(self,jediTaskID,fullFlag,withTaskInfo):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getJediTaskDetails(jediTaskID,fullFlag,withTaskInfo)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get a list of even ranges for a PandaID
def getEventRanges(self,pandaID,jobsetID,jediTaskID,nRanges):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getEventRanges(pandaID,jobsetID,jediTaskID,nRanges)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update an even range
def updateEventRange(self,eventRangeID,eventStatus,cpuCore,cpuConsumptionTime):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.updateEventRange(eventRangeID,eventStatus,cpuCore,cpuConsumptionTime)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update even ranges
def updateEventRanges(self,eventRanges):
# decode json
try:
eventRanges = json.loads(eventRanges)
except:
return json.dumps("ERROR : failed to convert eventRanges with json")
retList = []
for eventRange in eventRanges:
# extract parameters
try:
eventRangeID = eventRange['eventRangeID']
eventStatus = eventRange['eventStatus']
cpuCore = None
if 'cpuCore' in eventRange:
cpuCore = eventRange['cpuCore']
cpuConsumptionTime = None
if 'cpuConsumptionTime' in eventRange:
cpuConsumptionTime = eventRange['cpuConsumptionTime']
except:
retList.append(False)
continue
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.updateEventRange(eventRangeID,eventStatus,cpuCore,cpuConsumptionTime)
# release proxy
self.proxyPool.putProxy(proxy)
retList.append(ret)
# return
return json.dumps(retList)
# get retry history
def getRetryHistoryJEDI(self,jediTaskID):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getRetryHistoryJEDI(jediTaskID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# change task priority
def changeTaskPriorityPanda(self,jediTaskID,newPriority):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.changeTaskPriorityPanda(jediTaskID,newPriority)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get WAN data flow matrix
def getWanDataFlowMaxtrix(self):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getWanDataFlowMaxtrix()
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# throttle job
def throttleJob(self,pandaID):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.throttleJob(pandaID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# unthrottle job
def unThrottleJob(self,pandaID):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.unThrottleJob(pandaID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get the list of jobdefIDs for failed jobs in a task
def getJobdefIDsForFailedJob(self,jediTaskID):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getJobdefIDsForFailedJob(jediTaskID)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# change task attribute
def changeTaskAttributePanda(self,jediTaskID,attrName,attrValue):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.changeTaskAttributePanda(jediTaskID,attrName,attrValue)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# increase attempt number for unprocessed files
def increaseAttemptNrPanda(self,jediTaskID,increasedNr):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.increaseAttemptNrPanda(jediTaskID,increasedNr)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get jediTaskID from taskName
def getTaskIDwithTaskNameJEDI(self,userName,taskName):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getTaskIDwithTaskNameJEDI(userName,taskName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update error dialog for a jediTaskID
def updateTaskErrorDialogJEDI(self,jediTaskID,msg):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.updateTaskErrorDialogJEDI(jediTaskID,msg)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# update modificationtime for a jediTaskID to trigger subsequent process
def updateTaskModTimeJEDI(self,jediTaskID,newStatus=None):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.updateTaskModTimeJEDI(jediTaskID,newStatus)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# check input file status
def checkInputFileStatusInJEDI(self,jobSpec):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.checkInputFileStatusInJEDI(jobSpec)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# increase memory limit
def increaseRamLimitJEDI(self,jediTaskID,jobRamCount):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.increaseRamLimitJEDI(jediTaskID,jobRamCount)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# reset files in JEDI
def resetFileStatusInJEDI(self,dn,prodManager,datasetName,lostFiles,lostInputDatasets):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.resetFileStatusInJEDI(dn,prodManager,datasetName,lostFiles,lostInputDatasets)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# get input datasets for output dataset
def getInputDatasetsForOutputDatasetJEDI(self,datasetName):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getInputDatasetsForOutputDatasetJEDI(datasetName)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# copy file record
def copyFileRecord(self,newLFN,fileSpec):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.copyFileRecord(newLFN,fileSpec)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# throttle jobs for resource shares
def throttleJobsForResourceShare(self,site):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.throttleJobsForResourceShare(site)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# activate jobs for resource shares
def activateJobsForResourceShare(self,site,nJobsPerQueue):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.activateJobsForResourceShare(site,nJobsPerQueue)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# add associate sub datasets for single consumer job
def getDestDBlocksWithSingleConsumer(self,jediTaskID,PandaID,ngDatasets):
# get proxy
proxy = self.proxyPool.getProxy()
# exec
ret = proxy.getDestDBlocksWithSingleConsumer(jediTaskID,PandaID,ngDatasets)
# release proxy
self.proxyPool.putProxy(proxy)
# return
return ret
# Singleton
taskBuffer = TaskBuffer()
|
{
"content_hash": "a368c2cff610966f86c975f1086bfad6",
"timestamp": "",
"source": "github",
"line_count": 2784,
"max_line_length": 137,
"avg_line_length": 33.26867816091954,
"alnum_prop": 0.561088317857914,
"repo_name": "fbarreir/panda-server",
"id": "a871829151643d2af74a655306e4c5f017e95eb6",
"size": "92620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandaserver/taskbuffer/TaskBuffer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLSQL",
"bytes": "18429"
},
{
"name": "Python",
"bytes": "2406184"
},
{
"name": "Shell",
"bytes": "15098"
}
],
"symlink_target": ""
}
|
import django
from django.contrib import admin
from .models import EmailConfirmation, EmailAddress
from .adapter import get_adapter
class EmailAddressAdmin(admin.ModelAdmin):
list_display = ('email', 'user', 'primary', 'verified')
list_filter = ('primary', 'verified')
search_fields = []
raw_id_fields = ('user',)
def __init__(self, *args, **kwargs):
super(EmailAddressAdmin, self).__init__(*args, **kwargs)
if not self.search_fields and django.VERSION[:2] < (1, 7):
self.search_fields = self.get_search_fields(None)
def get_search_fields(self, request):
base_fields = get_adapter(request).get_user_search_fields()
return ['email'] + list(map(lambda a: 'user__' + a, base_fields))
class EmailConfirmationAdmin(admin.ModelAdmin):
list_display = ('email_address', 'created', 'sent', 'key')
list_filter = ('sent',)
raw_id_fields = ('email_address',)
admin.site.register(EmailConfirmation, EmailConfirmationAdmin)
admin.site.register(EmailAddress, EmailAddressAdmin)
|
{
"content_hash": "2617b8baa2f81ec55892e28aa4621f87",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 33.935483870967744,
"alnum_prop": 0.6739543726235742,
"repo_name": "wli/django-allauth",
"id": "9c2c8a2fd20358b104b6b389b93352e0cc9dace3",
"size": "1052",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "allauth/account/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42101"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "588192"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
# Create a defaultdict {entity:[entity, entity]}
class TripleReaderItems:
def __init__(self, triples_file):
self.baseuri = "http://www.wikidata.org/entity/"
self.d = defaultdict(list)
with open(triples_file) as f:
for l in f:
tmp = l.split("\t")
if len(tmp) < 3:
continue
# check whether object is also an entity
if self.baseuri in tmp[2]:
subj = tmp[0].strip().replace(self.baseuri, "")
obj = tmp[2].strip().replace(self.baseuri, "")
# We want to have the entity whether it's object or subject
self.d[subj].append(obj)
self.d[obj].append(subj)
def get(self, uri):
p = self.d[uri.strip().replace(self.baseuri, "")]
# add the key as a value to keep it as possible entity ID
p.append(uri.strip().replace(self.baseuri, ""))
return set(["%s%s" % (self.baseuri, i) for i in p])
|
{
"content_hash": "bc96246bb261d16cedd1483e7b2e5898",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 38.32142857142857,
"alnum_prop": 0.5293569431500466,
"repo_name": "hadyelsahar/RE-NLG-Dataset",
"id": "7e2143eb47197120edb65a09e0715d99f4e7a4d5",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/triplereaderitems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10162"
},
{
"name": "Python",
"bytes": "106508"
},
{
"name": "Shell",
"bytes": "17297"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
"""Transformation Pipeline"""
calibration_image_src = 'camera_cal/calibration*.jpg'
FONT_SIZE = 200
def calibrate_camera(calibration_image_src = calibration_image_src, ):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(calibration_image_src)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp) # image
imgpoints.append(corners) #real world 2D
return {'objpoints': objpoints, 'imgpoints': imgpoints}
def undistort_image(img, pts):
objpoints, imgpoints = pts['objpoints'], pts['imgpoints']
_shape = img.shape if len(img.shape) == 2 else img.shape[::2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, _shape,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
def gaussian_blur(img, kernel_size=7):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def sobel_thresholding(img, kernel_size=5, threshold=(30,255), dim='x'):
"""one dimensional thresholding"""
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
x, y = (1, 0) if dim is 'x' else (0,1)
sobel = cv2.Sobel(gray, cv2.CV_64F, x, y, ksize = kernel_size)
sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*sobel/np.max(sobel))
_, binary = cv2.threshold(scaled_sobel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
def direction_thresholding(img, kernel_size=15, threshold = (0.9, 1.1)):
"""threshold by angle of the gradient"""
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
_, binary = cv2.threshold(absgraddir.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# color channel thresholding
def hls_thresholding(img, channel_ix, threshold=(150,255)):
"""HLS thresholding"""
# channel in HLS
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = hls[:,:,channel_ix]
_, binary = cv2.threshold(channel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# color channel thresholding
def rgb_thresholding(img, channel_ix, threshold=(170,255)):
"""R thresholding"""
# R channel in BGR = cv2.COLOR_BGR2GRAY
channel = img[:,:,channel_ix]
_, binary = cv2.threshold(channel.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return binary
# # laplacian threshold
def laplacian_thresholding(img, kernel=15):
"""Laplacian thresholding"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
laplacian = cv2.Laplacian(gray,cv2.CV_32F,ksize= kernel)
return (laplacian < 0.15 * np.min(laplacian)).astype(np.uint8)
# gray channel threshold
def gray_thresholding(img, threshold=(130,255)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, gray_binary = cv2.threshold(gray.astype('uint8'), threshold[0], threshold[1], cv2.THRESH_BINARY)
return gray_binary
def define_vertices(img):
imshape = img.shape
vertices = np.array([[(0,imshape[0]), (imshape[1]/2., 3*imshape[0]/5.), (imshape[1],imshape[0])]], dtype=np.int32)
if vertices.shape[1]:
vertices = [vertices]
return vertices
def region_of_interest(img):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
vertices = define_vertices(img)
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
# additional layer of cropping. Take 70% and lower on the y-axis
crop = range(0, int(7 * img.shape[0] / 10),1)
masked_image[crop,:] = 0
return masked_image
def lane_masking(img, threshold = 25):
img = gaussian_blur(img)
s_binary = hls_thresholding(img, 2)
gray_binary = gray_thresholding(img)
laplacian_binary = laplacian_thresholding(img)
# AND following OR gate
combined_binary = cv2.bitwise_and(laplacian_binary, cv2.bitwise_or(s_binary, gray_binary))
# Region of interest
filtered_binary = region_of_interest(combined_binary)
# If one side is not detected apply x AND y on that side
# This happens after histogram filtering and region of interest
_hist = filtered_binary.sum(axis=0)
middlepoint = filtered_binary.shape[1] // 2
_left_pixel_sum, _right_pixel_sum = _hist[:middlepoint].sum(), _hist[middlepoint:].sum()
if _left_pixel_sum < threshold or _right_pixel_sum < threshold:
print("appending additional binary masking")
_second_pass_binary = post_lane_masking(img)
filtered_second_pass = region_of_interest(_second_pass_binary)
filtered_binary = cv2.bitwise_or(filtered_binary, filtered_second_pass)
return filtered_binary
def post_lane_masking(img):
x_binary = sobel_thresholding(img)
y_binary = sobel_thresholding(img,dim='y')
x_y_binary = cv2.bitwise_and(x_binary, y_binary)
return x_y_binary
def histogram_filter(img, offset = 50):
filtered = img.copy()
_hist = filtered.sum(axis=0)
middlepoint = filtered.shape[1] // 2
left_max_ix, right_max_ix = _hist[:middlepoint].argmax(), _hist[middlepoint:].argmax() + middlepoint
left_range, right_range = (left_max_ix - offset, left_max_ix + offset), (right_max_ix - offset, right_max_ix + offset)
filtered[:,:left_range[0]] = 0
filtered[:,left_range[1]:right_range[0]] = 0
filtered[:,right_range[1]:] = 0
return filtered
def fit_lanes(masked_image):
# determine the mid point along x-axis
image_shape = masked_image.shape
mid_point = image_shape[1]/2
# index
ix = masked_image.nonzero()
left_xs = np.arange(0, mid_point, 1).reshape(-1,1)
right_xs = np.arange(mid_point, image_shape[1], 1).reshape(-1,1)
points = [(x,y) for y,x in zip(ix[0],ix[1])]
# linear regression for left and right space
left_points = np.array(list(filter(lambda x: x[0] < mid_point, points )))
right_points = np.array(list(filter(lambda x: x[0] >= mid_point, points )))
lr_left, lr_right = LinearRegression(), LinearRegression()
lr_right.fit(right_points[:,0].reshape(-1,1), right_points[:,1].reshape(-1,1))
lr_left.fit(left_points[:,0].reshape(-1,1), left_points[:,1].reshape(-1,1))
# prediction for left and right space
left_ys = lr_left.predict(left_xs).reshape(-1,)
right_ys = lr_right.predict(right_xs).reshape(-1,)
left_xs = left_xs.reshape(-1,)
right_xs = right_xs.reshape(-1,)
# Mask Y values
points_left = np.array(list(filter(lambda p: p[1] > image_shape[0]//2 and p[1] < image_shape[0] , zip(left_xs,left_ys))))
points_right = np.array(list(filter(lambda p: p[1] > image_shape[0]//2 and p[1] < image_shape[0], zip(right_xs,right_ys))))
return points_left, points_right
def retrieve_src_points(left, right, shape):
y_cutoff = 65 * shape // 100
left_cutoff_ix = (left[:,1] > y_cutoff).nonzero()[0].max()
right_cutoff_ix = (right[:,1] > y_cutoff).nonzero()[0].min()
p1, p2 = left[left_cutoff_ix,], right[right_cutoff_ix,]
# Retreieve the trapezoid for perspective transformation
# We can use the points for all images
l = {}
l1, l2 = np.array([int(left[:,0].min()), int(left[:,1].max())]), p1
r = {}
r1, r2 = np.array([int(right[:,0].max()), int(right[:,1].max())]), p2
return np.float32([l1, l2, r1, r2])
def setup_transformation_pipeline(offset=10):
"""
Set up the transformation pipeline
Encapsulate the camera distortion and
transformation pipeline that includes warping of the detected lane points
"""
pts = calibrate_camera()
images = glob.glob("test_images/*")
# Pick the image with a straight lane to calibrate the camera
img = cv2.imread(images[0])
# run the same pipeline
dst = undistort_image(img, pts)
masked_img = lane_masking(dst)
_img = region_of_interest(masked_img)
# instead of polynomial fit
# use linear regression to determine the src for perspective transformation
left,right = fit_lanes(_img)
src = retrieve_src_points(left, right, masked_img.shape[0])
dst = np.float32([src[0], (src[0][0], offset), src[2], (src[2][0], offset)])
return TransformationPipeline(pts, src, dst)
class PerspectiveTransformer:
def __init__(self, src, dst):
self.src = src #both src and dst should be mappings that are representative of a straight lane
self.dst = dst
self.M = cv2.getPerspectiveTransform(src, dst)
self.M_inv = cv2.getPerspectiveTransform(dst, src)
def transform(self, img):
return cv2.warpPerspective(img, self.M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
def inverse_transform(self, img):
return cv2.warpPerspective(img, self.M_inv, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
class TransformationPipeline():
def __init__(self, camera_calibration, src, dst ):
self.camera_calibration = camera_calibration
self.perspective_transformer = PerspectiveTransformer(src, dst)
def transform(self, img):
_img = self.undistort_image(img)
# depending on the avail of filtered_warped_image apply another round of masking
binary_img = self.lane_masking(_img)
warped_image = self.perspective_transform(binary_img)
filtered_warped_image = self.histogram_filter(warped_image)
return filtered_warped_image
def undistort_image(self, img):
return undistort_image(img, self.camera_calibration)
def lane_masking(self, img):
return lane_masking(img)
def post_lane_masking(self, img, warped):
return post_lane_masking(img, warped)
def region_of_interest(self, img):
# Filters the image for the lower trapezoid
return region_of_interest(img)
def perspective_transform(self, img):
return self.perspective_transformer.transform(img)
def inverse_perspective_transform(self, img):
return self.perspective_transformer.inverse_transform(img)
def histogram_filter(self, img):
return histogram_filter(img)
|
{
"content_hash": "351bcdad49a0f3f64622c694ed22572e",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 127,
"avg_line_length": 40.18556701030928,
"alnum_prop": 0.6674362921156148,
"repo_name": "dzorlu/sdc",
"id": "560ef18b8eca8341a1c56f24883af1946c09cd96",
"size": "11694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "advanced_lane_detection/image_transformation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "351494"
},
{
"name": "C++",
"bytes": "16386318"
},
{
"name": "CMake",
"bytes": "270470"
},
{
"name": "CSS",
"bytes": "5383"
},
{
"name": "Cuda",
"bytes": "131738"
},
{
"name": "Fortran",
"bytes": "1326303"
},
{
"name": "HTML",
"bytes": "1504171"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Jupyter Notebook",
"bytes": "18788347"
},
{
"name": "Makefile",
"bytes": "224292"
},
{
"name": "Python",
"bytes": "85708"
},
{
"name": "Shell",
"bytes": "23876"
}
],
"symlink_target": ""
}
|
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2511
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificateReqTestCase(unittest.TestCase):
pem_text = """\
MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
"""
def setUp(self):
self.asn1Spec = rfc2511.CertReqMessages()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
import sys
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
{
"content_hash": "ee1b64fa2db734b9f8cc15c5ca7a0c89",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.8044058744993324,
"repo_name": "cloudera/hue",
"id": "494d0172e67cf619cbeffa2c5c489a9f8dc82d0a",
"size": "1663",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pyasn1-modules-0.2.6/tests/test_rfc2511.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
from QUANTAXIS.QAFetch.QAQuery import (QA_fetch_future_day,
QA_fetch_future_min,
QA_fetch_future_tick,
QA_fetch_index_day, QA_fetch_index_min,
QA_fetch_stock_day, QA_fetch_stock_min)
from QUANTAXIS.QAMarket.QAMarket_engine import (market_future_engine,
market_stock_day_engine,
market_stock_engine)
from QUANTAXIS.QAUtil import (QA_util_log_info,
QA_util_to_json_from_pandas)
class QA_Market():
"""
QUANTAXIS MARKET 部分
回测/模拟盘
股票/指数/期货/债券/ETF
@yutiansut
"""
def __init__(self, commission_fee_coeff=0.0015):
self.engine = {'stock_day': QA_fetch_stock_day, 'stock_min': QA_fetch_stock_min,
'future_day': QA_fetch_future_day, 'future_min': QA_fetch_future_min, 'future_tick': QA_fetch_future_tick}
self.commission_fee_coeff = commission_fee_coeff
def __repr__(self):
return '< QA_MARKET >'
def _choice_trading_market(self, __order, __data=None):
assert isinstance(__order.type, str)
if __order.type == '0x01':
__data = self.__get_stock_day_data(
__order) if __data is None else __data
return market_stock_day_engine(__order, __data, self.commission_fee_coeff)
elif __order.type == '0x02':
# 获取股票引擎
__data = self.__get_stock_min_data(
__order) if __data is None else __data
return market_stock_engine(__order, __data, self.commission_fee_coeff)
elif __order.type == '0x03':
__data = self.__get_index_day_data(
__order) if __data is None else __data
return market_stock_engine(__order, __data, self.commission_fee_coeff)
elif __order.type == '0x04':
__data = self.__get_index_min_data(
__order) if __data is None else __data
return market_stock_engine(__order, __data, self.commission_fee_coeff)
elif __order.type == '1x01':
return market_future_engine(__order, __data)
elif __order.type == '1x02':
return market_future_engine(__order, __data)
elif __order.type == '1x03':
return market_future_engine(__order, __data)
def __get_stock_min_data(self, __order):
__data = QA_util_to_json_from_pandas(QA_fetch_stock_min(str(
__order.code)[0:6], str(__order.datetime)[0:19], str(__order.datetime)[0:10], 'pd'))
if len(__data) == 0:
pass
else:
__data = __data[0]
return __data
def __get_stock_day_data(self, __order):
__data = QA_util_to_json_from_pandas(QA_fetch_stock_day(str(
__order.code)[0:6], str(__order.datetime)[0:10], str(__order.datetime)[0:10], 'pd'))
if len(__data) == 0:
pass
else:
__data = __data[0]
return __data
def __get_index_day_data(self, __order):
__data = QA_util_to_json_from_pandas(QA_fetch_index_day(str(
__order.code)[0:6], str(__order.datetime)[0:10], str(__order.datetime)[0:10], 'pd'))
if len(__data) == 0:
pass
else:
__data = __data[0]
return __data
def __get_index_min_data(self, __order):
__data = QA_util_to_json_from_pandas(QA_fetch_index_min(str(
__order.code)[0:6], str(__order.datetime)[0:10], str(__order.datetime)[0:10], 'pd'))
if len(__data) == 0:
pass
else:
__data = __data[0]
return __data
def receive_order(self, __order, __data=None):
"""
get the order and choice which market to trade
"""
def __confirm_order(__order):
if isinstance(__order.price, str):
if __order.price == 'market_price':
return __order
elif __order.price == 'close_price':
return __order
elif __order.price == 'strict' or 'strict_model' or 'strict_price':
__order.price = 'strict_price'
return __order
else:
QA_util_log_info('unsupport type:' + __order.price)
return __order
else:
return __order
return self._choice_trading_market(__confirm_order(__order), __data)
def trading_engine(self):
pass
|
{
"content_hash": "ebf06861dd3429edd5518fe45972bfdb",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 129,
"avg_line_length": 38.831932773109244,
"alnum_prop": 0.5027050421986583,
"repo_name": "EmmaIshta/QUANTAXIS",
"id": "01b5d6b365d374aa51d048e24980c4728b038d84",
"size": "5812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QUANTAXIS/QAMarket/QAMarket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "42437"
},
{
"name": "CSS",
"bytes": "10888"
},
{
"name": "HTML",
"bytes": "528"
},
{
"name": "JavaScript",
"bytes": "126471"
},
{
"name": "Jupyter Notebook",
"bytes": "12926"
},
{
"name": "Python",
"bytes": "659910"
},
{
"name": "Vue",
"bytes": "59840"
}
],
"symlink_target": ""
}
|
from sentry_sdk.hub import Hub, init
from sentry_sdk.scope import Scope
from sentry_sdk.transport import Transport, HttpTransport
from sentry_sdk.client import Client
from sentry_sdk.api import * # noqa
from sentry_sdk.api import __all__ as api_all
from sentry_sdk.consts import VERSION # noqa
__all__ = api_all + [ # noqa
"Hub",
"Scope",
"Client",
"Transport",
"HttpTransport",
"init",
"integrations",
]
# Initialize the debug support after everything is loaded
from sentry_sdk.debug import init_debug_support
init_debug_support()
del init_debug_support
|
{
"content_hash": "62c601bb8d97a6b6a702342195ef1ca1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 23.64,
"alnum_prop": 0.7140439932318104,
"repo_name": "liszd/whyliam.workflows.youdao",
"id": "b211a6c754ace40753aea57491cbd38a98f674ff",
"size": "591",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sentry_sdk/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196285"
}
],
"symlink_target": ""
}
|
"""Views for Zinnia archives"""
import datetime
from django.utils import timezone
from django.views.generic.dates import BaseArchiveIndexView
from django.views.generic.dates import BaseYearArchiveView
from django.views.generic.dates import BaseMonthArchiveView
from django.views.generic.dates import BaseWeekArchiveView
from django.views.generic.dates import BaseDayArchiveView
from django.views.generic.dates import BaseTodayArchiveView
from zinnia.models.entry import Entry
from zinnia.views.mixins.archives import ArchiveMixin
from zinnia.views.mixins.archives import PreviousNextPublishedMixin
from zinnia.views.mixins.callable_queryset import CallableQuerysetMixin
from zinnia.views.mixins.prefetch_related import PrefetchCategoriesAuthorsMixin
from zinnia.views.mixins.templates import \
EntryQuerysetArchiveTemplateResponseMixin
from zinnia.views.mixins.templates import \
EntryQuerysetArchiveTodayTemplateResponseMixin
class EntryArchiveMixin(ArchiveMixin,
PreviousNextPublishedMixin,
PrefetchCategoriesAuthorsMixin,
CallableQuerysetMixin,
EntryQuerysetArchiveTemplateResponseMixin):
"""
Mixin combinating:
- ArchiveMixin configuration centralizing conf for archive views.
- PrefetchCategoriesAuthorsMixin to prefetch related objects.
- PreviousNextPublishedMixin for returning published archives.
- CallableQueryMixin to force the update of the queryset.
- EntryQuerysetArchiveTemplateResponseMixin to provide a
custom templates for archives.
"""
# queryset = Entry.published.all
queryset = None
class EntryIndex(EntryArchiveMixin,
EntryQuerysetArchiveTodayTemplateResponseMixin,
BaseArchiveIndexView):
"""
View returning the archive index.
"""
template_name = 'zinnia/entry_list.html'
context_object_name = 'entry_list'
class EntryYear(EntryArchiveMixin, BaseYearArchiveView):
"""
View returning the archives for a year.
"""
template_name = 'zinnia/entry_list.html'
make_object_list = True
template_name_suffix = '_archive_year'
class EntryMonth(EntryArchiveMixin, BaseMonthArchiveView):
"""
View returning the archives for a month.
"""
template_name = 'zinnia/entry_list.html'
template_name_suffix = '_archive_month'
class EntryWeek(EntryArchiveMixin, BaseWeekArchiveView):
"""
View returning the archive for a week.
"""
template_name = 'zinnia/entry_list.html'
template_name_suffix = '_archive_week'
def get_dated_items(self):
"""
Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view.
"""
self.date_list, self.object_list, extra_context = super(
EntryWeek, self).get_dated_items()
self.date_list = self.get_date_list(self.object_list, 'day')
extra_context['week_end_day'] = extra_context[
'week'] + datetime.timedelta(days=6)
return self.date_list, self.object_list, extra_context
class EntryDay(EntryArchiveMixin, BaseDayArchiveView):
"""
View returning the archive for a day.
"""
template_name = 'zinnia/entry_list.html'
template_name_suffix = '_archive_day'
class EntryToday(EntryArchiveMixin, BaseTodayArchiveView):
"""
View returning the archive for the current day.
"""
template_name = 'zinnia/entry_list.html'
template_name_suffix = '_archive_today'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
And defines self.year/month/day for
EntryQuerysetArchiveTemplateResponseMixin.
"""
today = timezone.now()
if timezone.is_aware(today):
today = timezone.localtime(today)
self.year, self.month, self.day = today.date().isoformat().split('-')
return self._get_dated_items(today)
|
{
"content_hash": "32b2272ed91dd416c64036371813248a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 34.96491228070175,
"alnum_prop": 0.706472654290015,
"repo_name": "pczhaoyun/obtainfo",
"id": "7f4e637598b43105cea43867472fd890ae526f82",
"size": "3986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zinnia/views/archives.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14"
},
{
"name": "C",
"bytes": "15992"
},
{
"name": "CSS",
"bytes": "308593"
},
{
"name": "HTML",
"bytes": "300477"
},
{
"name": "JavaScript",
"bytes": "1064973"
},
{
"name": "PHP",
"bytes": "4312"
},
{
"name": "Python",
"bytes": "1122394"
},
{
"name": "Ruby",
"bytes": "498"
},
{
"name": "Shell",
"bytes": "1814"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nuno Barreto', 'nbarreto@gmail.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'frozenflower' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'y820@g#+f5&25s+7d!ggj(tz8g(28tx921+g&zwjhk-4*hdrc='
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'frozenflower.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/var/local/django/frozenflower/frontend/templates/',
'/Users/nbarr/Sites/djcode/frozenflower/frontend/templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.admindocs',
'frozenflower.frontend',
)
|
{
"content_hash": "c34e370fc7da4a3018be12dc9404e917",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 106,
"avg_line_length": 36.207317073170735,
"alnum_prop": 0.7096665543954194,
"repo_name": "washimimizuku/frozen-flower",
"id": "279669954a1ef1deee5675eebd4de59a108241e3",
"size": "3014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frozenflower2/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23163"
},
{
"name": "JavaScript",
"bytes": "16113"
},
{
"name": "PHP",
"bytes": "155840"
},
{
"name": "Python",
"bytes": "21768"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import argparse
class CommandLineInterface(object):
def __init__(self):
self.args = None
self.verbose = False
def options_parse(self):
parser = argparse.ArgumentParser(description='Adynaton Network Tool')
parser.add_argument(
'-n',
dest='testingoff',
action='store_true',
help='Turn Unit and Coverage Testing Off')
parser.add_argument(
'-v',
dest='verbose',
action='store_true',
help='Verbose Output')
parser.add_argument(
'--dhcp-client',
action='store_true',
help='DHCP Client')
parser.add_argument(
'--diameter-client',
action='store_true',
help='Diameter Client')
parser.add_argument(
'--dns-client',
action='store_true',
help='DNS Client')
parser.add_argument(
'--ftp-client',
action='store_true',
help='FTP Client')
parser.add_argument(
'--http-client',
action='store_true',
help='HTTP Client')
parser.add_argument(
'--iscsi-client',
action='store_true',
help='ISCSI Client')
parser.add_argument(
'--ldap-client',
action='store_true',
help='LDAP Client')
parser.add_argument(
'--nfs-client',
action='store_true',
help='NFS Client')
parser.add_argument(
'--ntp-client',
action='store_true',
help='NTP Client')
parser.add_argument(
'--radius-client',
action='store_true',
help='RADIUS Client')
parser.add_argument(
'--snmp-client',
action='store_true',
help='SNMP Client')
parser.add_argument(
'--syslog-client',
action='store_true',
help='Syslog Client')
parser.add_argument(
'--tftp-client',
action='store_true',
help='TFTP Client')
parser.add_argument(
'--dhcp-server',
action='store_true',
help='DHCP Server')
parser.add_argument(
'--diameter-server',
action='store_true',
help='Diameter Server')
parser.add_argument(
'--dns-server',
action='store_true',
help='DNS Server')
parser.add_argument(
'--ftp-server',
action='store_true',
help='FTP Server')
parser.add_argument(
'--http-server',
action='store_true',
help='HTTP Server')
parser.add_argument(
'--iscsi-server',
action='store_true',
help='ISCSI Server')
parser.add_argument(
'--ldap-server',
action='store_true',
help='LDAP Server')
parser.add_argument(
'--nfs-server',
action='store_true',
help='NFS Server')
parser.add_argument(
'--ntp-server',
action='store_true',
help='NTP Server')
parser.add_argument(
'--radius-server',
action='store_true',
help='RADIUS Server')
parser.add_argument(
'--snmp-server',
action='store_true',
help='SNMP Server')
parser.add_argument(
'--syslog-server',
action='store_true',
help='Syslog Server')
parser.add_argument(
'--tftp-server',
action='store_true',
help='TFTP Server')
self.args = parser.parse_args()
self.verbose = self.args.verbose
|
{
"content_hash": "e90d1b41f01988235726b99d2de48a07",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 77,
"avg_line_length": 31.806896551724137,
"alnum_prop": 0.5277536860364267,
"repo_name": "lathama/Adynaton",
"id": "36037b198584d2563f0b3ced37ec340d3a14c346",
"size": "4612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adynaton/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168176"
}
],
"symlink_target": ""
}
|
import six
import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.validation_utils as vu
HBASE_MASTER = np.NodeProcess(
name='hbmaster',
ui_name='HBase-Master',
package='mapr-hbase-master',
open_ports=[60000, 60010]
)
HBASE_REGION_SERVER = np.NodeProcess(
name='hbregionserver',
ui_name='HBase-RegionServer',
package='mapr-hbase-regionserver',
open_ports=[60020]
)
HBASE_THRIFT = np.NodeProcess(
name='hbasethrift',
ui_name='HBase-Thrift',
package='mapr-hbasethrift',
open_ports=[9090]
)
class HBase(s.Service):
def __init__(self):
super(HBase, self).__init__()
self._name = 'hbase'
self._ui_name = 'HBase'
self._node_processes = [
HBASE_MASTER,
HBASE_REGION_SERVER,
HBASE_THRIFT,
]
self._cluster_defaults = ['hbase-default.json']
self._validation_rules = [
vu.at_least(1, HBASE_MASTER),
vu.at_least(1, HBASE_REGION_SERVER),
]
def get_config_files(self, cluster_context, configs, instance=None):
hbase_site = bcf.HadoopXML("hbase-site.xml")
hbase_site.remote_path = self.conf_dir(cluster_context)
if instance:
hbase_site.fetch(instance)
hbase_site.load_properties(configs)
return [hbase_site]
@six.add_metaclass(s.Single)
class HBaseV094(HBase):
def __init__(self):
super(HBaseV094, self).__init__()
self._version = '0.94.24'
self._dependencies = [('mapr-hbase', self.version)]
@six.add_metaclass(s.Single)
class HBaseV0987(HBase):
def __init__(self):
super(HBaseV0987, self).__init__()
self._version = '0.98.7'
self._dependencies = [('mapr-hbase', self.version)]
@six.add_metaclass(s.Single)
class HBaseV0989(HBase):
def __init__(self):
super(HBaseV0989, self).__init__()
self._version = '0.98.9'
self._dependencies = [('mapr-hbase', self.version)]
@six.add_metaclass(s.Single)
class HBaseV09812(HBase):
def __init__(self):
super(HBaseV09812, self).__init__()
self._version = "0.98.12"
self._dependencies = [("mapr-hbase", self.version)]
|
{
"content_hash": "d3b2f58d6d8c739d245a49b4c815ed25",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 72,
"avg_line_length": 28.120481927710845,
"alnum_prop": 0.6161096829477292,
"repo_name": "zhangjunli177/sahara",
"id": "4f9aaceb1c52d1a60e4ae007c089d52ac6208401",
"size": "2924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/plugins/mapr/services/hbase/hbase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "29432"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3131969"
},
{
"name": "Shell",
"bytes": "60900"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestFlattenOp(OpTest):
def setUp(self):
self.op_type = "flatten2"
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float32")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype("float32")
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 2, 5)
self.axis = 1
self.new_shape = (3, 20)
def init_attrs(self):
self.attrs = {"axis": self.axis}
class TestFlattenOp(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 2, 3)
self.axis = 0
self.new_shape = (1, 36)
class TestFlattenOpWithDefaultAxis(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 2, 3)
self.new_shape = (3, 12)
def init_attrs(self):
self.attrs = {}
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ff09ab597ff94f66322f7d676e492fda",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 24.135593220338983,
"alnum_prop": 0.5723314606741573,
"repo_name": "chengduoZH/Paddle",
"id": "59185855a5f13b82ca26bc26ead73fbe5fb96443",
"size": "2037",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_flatten2_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
"""
Boolean parameter type testcases.
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Test cases :
------------
- Testing minimum
- Testing maximum
- Testing negative value
- Testing overflow
"""
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Class containing SET/GET tests on a Boolean parameter
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/BOOL"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def testBooleanMaximum(self):
"""
Testing maximum value for boolean parameter
-------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set a boolean parameter to the max value 1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- Boolean set to 1
"""
log.D(self.testBooleanMaximum.__doc__)
value = "1"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("When setting parameter %s : %s" % (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert out == value, log.F("incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
def testBooleanMinimum(self):
"""
Testing minimum value for boolean parameter
-------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set a boolean parameter to the min value 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- Boolean set to 0
"""
log.D(self.testBooleanMinimum.__doc__)
value = "0"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("When setting parameter %s : %s" % (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("Error when setting parameter %s : %s" % (self.param_name, err))
assert out == value, log.F("Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
def testBooleanNegative(self):
"""
Testing negative value for boolean parameter
--------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set a boolean parameter to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- Error detected, boolean not updated
"""
print self.testBooleanNegative.__doc__
value = "-1"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out != "Done", log.F("When setting parameter %s : %s" % (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert out != value, log.F("incorrect value for %s, expected: %s, found: %s") % (self.param_name, value, out)
def testBooleanOverflow(self):
"""
Testing overflowed value for boolean parameter
----------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set a boolean parameter to 2
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- Error detected, boolean not updated
"""
print self.testBooleanOverflow.__doc__
value = "2"
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out != "Done", log.F("When setting parameter %s : %s" % (self.param_name, out))
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert out != value, log.F("incorrect value for %s, expected: %s, found: %s") % (self.param_name, value, out)
|
{
"content_hash": "1c14b52c260063ba37d72d147fc5384f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 117,
"avg_line_length": 40.29007633587786,
"alnum_prop": 0.4990526714664646,
"repo_name": "krocard/parameter-framework",
"id": "9c95692ef498ffa36c2aed36bec0144f687ecf8b",
"size": "6843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional-tests-legacy/PfwTestCase/Types/tBoolean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15260"
},
{
"name": "C++",
"bytes": "1319650"
},
{
"name": "CMake",
"bytes": "73231"
},
{
"name": "Python",
"bytes": "666523"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/08/24 12:12:31 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except (KeyError, e):
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "385bf4f00ee38eb344141774e10e5212",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 114,
"avg_line_length": 38.34914611005693,
"alnum_prop": 0.6368134586838199,
"repo_name": "engineer0x47/SCONS",
"id": "5aee71be187624ab5dd11ff0013a5b8303cb117d",
"size": "20210",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "engine/SCons/Tool/packaging/msi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3707391"
},
{
"name": "Shell",
"bytes": "2934"
}
],
"symlink_target": ""
}
|
import requests
import datetime
import re
import unicodedata # for solution that did not work and is commeted out
class event(object):
def __init__(self, title, time, location):
self.title = title
self.time = time
self.location = location
def day(self):
try:
day = re.findall('\w+', self.time)[:3]
day = ' '.join(day)
try:
return datetime.datetime.strptime(day, "%d %b %Y")
except ValueError:
return datetime.datetime.strptime(day, "%d %B %Y")
except ValueError:
return self.time
def status(self):
if isinstance(self.day(), datetime.datetime):
now = datetime.datetime.now()
if now < self.day():
return 'Upcoming'
elif now - self.day() < datetime.timedelta(days=1):
return 'Today'
else:
return 'Missed'
else:
return 'Unknown'
def __str__(self):
try:
rtnVal = str(self.status() + ' Event: %s' %self.title)
except Exception as ee:
rtnVal = str_Intl(self.status() + ' Event: %s' %self.title.encode('utf-8'))
return rtnVal
# this function created instead of modifying __str__ because in testing, this error cropped up
# both in the use of a print() satement all by itself, and in an event.__str__ call
def str_Intl(strng):
try:
strng2 = strng.encode('utf-8')
rtnVal = str(strng2)
except UnicodeEncodeError as uee:
print("Warning!")
print("%s: %s" %(type(uee), uee))
chrStartIndx = len("'ascii' codec can't encode character ")
chrEndIndx = str(uee).find(" in position ")
replStr = str(uee)[chrStartIndx:chrEndIndx]
startIndx = (chrEndIndx+1) + len("in position ")
endIndx = str(uee).find(": ordinal")
oIndx = int(str(uee)[startIndx:endIndx])
print("Character %d cannot be processed by print() or str() and will be replaced." %(oIndx))
print("---------------------")
rtnVal = (strng[0:oIndx] + ("\"%s\"" %replStr) + strng[(oIndx+1):])
rtnVal = str_Intl(rtnVal) # recursive fuction call
except UnicodeDecodeError as ude:
# early testing with this line from stack overflow did not work for us:
# strng.encode('utf-8').strip()
# this solution also strips off the problem characters without outputting what they were
print("Warning!")
print("%s: %s" %(type(ude), ude))
print("Where possible, characters are replaced with their closest ascii equivelence.")
# earlier use of .encode() fixed one issue and bypassed the UnicodeEncodeError handling
# it then triggered this error for one of the other cases, so now we trying other solutions:
strng_u = unicode(strng, "utf-8")
rtnVal = unicodedata.normalize('NFKD', strng_u).encode('ascii', 'ignore')
# this threw an error that 2nd argument must be unicode, not string
# added string_u line as a fix for that
rtnVal = str_Intl(rtnVal)
except Exception as ee:
# when calling this code in a loop, you lose one value and get this error message output instead
# but the loop can continue over the rest of your data
rtnVal = "String data coult not be processed. Error: %s : %s" %(type(ee), ee)
return rtnVal
text = requests.get('https://www.python.org/events/python-user-group/').text
timePattern = '<time datetime="[\w:+-]+">(.+)<span class="say-no-more">([\d ]+)</span>(.*)</time>'
locationPattern = '<span class="event-location">(.*)</span>'
titlePattern = '<h3 class="event-title"><a href=".+">(.*)</a></h3>'
time = re.findall(timePattern, text)
time = [''.join(i) for i in time]
location = re.findall(locationPattern, text)
title = re.findall(titlePattern, text)
events = [event(title[i], time[i], location[i]) for i in range(len(title))]
for i in events:
print (30*'-')
print(i) # bug fix: i is in events, so this calls __str__ in the object
print (' Time : %s' %i.time)
try:
print (' Location: %s' %i.location)
except Exception as ee:
print (str_Intl(' Location: %s' %i.location)) # bug fix: error thrown here too
# str_Intl() will parse out type of error in its try block
|
{
"content_hash": "235f419f6a322b351b1a8d294f4eb68f",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 116,
"avg_line_length": 42.27777777777778,
"alnum_prop": 0.5711782742006132,
"repo_name": "TheMitchWorksPro/DataTech_Playground",
"id": "5f3c78afca3dc36fcf482038ee81b24203f4231b",
"size": "4799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PY_Debug_Investigations/PY27_UEE_UDE/script/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1421732"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Synchronization
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("SyncDataModel",
"sync_rheader",
"sync_now",
"sync_job_reset"
)
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class SyncDataModel(S3Model):
names = ("sync_config",
"sync_status",
"sync_repository",
"sync_task",
"sync_resource_filter",
"sync_job",
"sync_log"
)
def model(self):
T = current.T
db = current.db
request = current.request
s3 = current.response.s3
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
NONE = messages["NONE"]
crud_strings = s3.crud_strings
define_table = self.define_table
add_components = self.add_components
configure = self.configure
set_method = self.set_method
s3_datetime_represent = lambda dt: \
S3DateTime.datetime_represent(dt, utc=True)
# -------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------
tablename = "sync_config"
define_table(tablename,
Field("proxy",
label=T("Proxy Server URL"),
requires=IS_EMPTY_OR(IS_URL(mode="generic"))),
*s3_meta_fields())
# Field configuration
# @todo: make in-line
table = db[tablename]
table.uuid.readable = True
table.uuid.label = "UUID"
table.uuid.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("UUID"),
T("Unique identifier which THIS repository identifies itself with when sending synchronization requests.")))
table.proxy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Proxy Server URL"),
T("URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.")))
# CRUD Strings
crud_strings[tablename] = Storage(
title_display = T("Synchronization Settings"),
title_update = T("Edit Synchronization Settings"),
msg_record_modified = T("Synchronization settings updated"))
# Resource Configuration
configure(tablename,
insertable=False,
deletable=False,
update_next=URL(c="sync", f="config", args=["1", "update"]))
# -------------------------------------------------------------------------
# Status
# -------------------------------------------------------------------------
tablename = "sync_status"
define_table(tablename,
Field("running", "boolean",
default=False,
readable=False,
writable=False),
Field("manual", "boolean",
default=False,
readable=False,
writable=False),
Field("timestmp", "datetime",
readable=False,
writable=False))
# -------------------------------------------------------------------------
# Repository
# -------------------------------------------------------------------------
sync_repository_types = {
"eden": "Sahana Eden",
"ccrm": "CiviCRM",
"wrike": "Wrike",
"mcb": "Mariner CommandBridge",
}
tablename = "sync_repository"
define_table(tablename,
Field("name", length=64, notnull=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Repository Name"),
T("Name of the repository (for you own reference)"))),
),
Field("apitype",
label=T("Repository Type"),
requires = IS_IN_SET(sync_repository_types),
default = "eden",
represent = lambda opt: \
NONE if not opt else \
sync_repository_types.get(opt, NONE),
),
Field("url",
label="URL",
requires = IS_EMPTY_OR(
IS_NOT_IN_DB(db, "sync_repository.url")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Repository Base URL"),
T("Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden"))),
),
Field("username",
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Username"),
T("Username to use for authentication at the remote site."))),
),
Field("password", "password",
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Password"),
T("Password to use for authentication at the remote site."))),
),
Field("client_id",
label = T("Client ID"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Client ID"),
T("The client ID to use for authentication at the remote site (if required for this type of repository)."))),
),
Field("client_secret", "password",
label = T("Client Secret"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Client Secret"),
T("The client secret to use for authentication at the remote site (if required for this type of repository)."))),
),
Field("site_key",
label = T("Site Key"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Site Key"),
T("Site Key which this site uses to authenticate at the remote site (if required for this type of repository)."))),
),
Field("refresh_token",
readable = False,
writable = False,
),
Field("proxy",
label=T("Proxy Server URL"),
requires=IS_EMPTY_OR(IS_URL(mode="generic")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Proxy Server URL"),
T("URL of the proxy server to connect to the repository (leave empty for default proxy)"))),
),
Field("last_status",
readable=False,
writable=False,
label=T("Last status"),
),
Field("accept_push", "boolean",
represent = s3_yes_no_represent,
default=False,
label=T("Accept Push"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Accept Push"),
T("Accept unsolicited data transmissions from the repository."))),
),
Field.Method("last_pull_time",
self.sync_repository_last_pull_time),
Field.Method("last_push_time",
self.sync_repository_last_push_time),
*s3_meta_fields())
# CRUD Strings
ADD_REPOSITORY = T("Create Repository")
crud_strings[tablename] = Storage(
label_create = ADD_REPOSITORY,
title_display = T("Repository Configuration"),
title_list = T("Repositories"),
title_update = T("Edit Repository Configuration"),
label_list_button = T("List Repositories"),
msg_record_created = T("Repository configured"),
msg_record_modified = T("Repository configuration updated"),
msg_record_deleted = T("Repository configuration deleted"),
msg_list_empty = T("No repositories configured"))
# Resource Configuration
configure(tablename,
list_fields=["name",
"uuid",
"accept_push",
(T("Last Pull"), "last_pull_time"),
(T("Last Push"), "last_push_time"),
],
onaccept=self.sync_repository_onaccept,
ondelete=self.sync_repository_ondelete,
create_next=URL(c="sync",
f="repository",
args=["[id]", "task"],
),
update_next=URL(c="sync",
f="repository",
args=["[id]"],
)
)
set_method("sync", "repository", method="now", action=sync_now)
# Reusable Fields
repository_id = S3ReusableField("repository_id", "reference %s" % tablename,
requires = IS_ONE_OF(db,
"sync_repository.id",
"%(name)s"),
represent = self.sync_repository_represent,
label = T("Repository"))
# Components
add_components(tablename,
sync_task="repository_id",
sync_log="repository_id",
#sync_conflict="repository_id",
**{# Scheduler Jobs
S3Task.TASK_TABLENAME: {"name": "job",
"joinby": "repository_id",
"link": "sync_job",
"key": "scheduler_task_id",
"actuate": "replace",
},
}
)
# -------------------------------------------------------------------------
# Task
# -------------------------------------------------------------------------
# Synchronization mode
sync_mode = {
1: T("pull"), # pull only
2: T("push"), # push only
3: T("pull and push"), # pull & push
4: T("none") # do not synchronize this resource
}
# Strategy (allowed import methods)
sync_strategy = S3ImportItem.METHOD
sync_strategy_represent = lambda opt: opt and \
", ".join([o for o in sync_strategy.values()
if o in opt]) or NONE
# Update method
sync_update_method = {
1: T("update"), # update the existing record
2: T("replace"), # replace the existing record
}
# Update/conflict resolution policy
sync_policies = S3ImportItem.POLICY
sync_policy = {
sync_policies.OTHER: T("always update"),
sync_policies.NEWER: T("update if newer"),
sync_policies.MASTER: T("update if master"),
sync_policies.THIS: T("never update")
}
sync_policy_represent = lambda opt: \
opt and sync_policy.get(opt, UNKNOWN_OPT) or NONE
tablename = "sync_task"
define_table(tablename,
repository_id(),
Field("resource_name",
notnull=True),
Field("last_pull", "datetime",
readable=True,
writable=False,
label=T("Last pull on")),
Field("last_push", "datetime",
readable=True,
writable=False,
label=T("Last push on")),
Field("mode", "integer",
requires = IS_IN_SET(sync_mode,
zero=None),
default = 3,
label = T("Mode"),
represent = lambda opt: \
sync_mode.get(opt, NONE)),
Field("strategy", "list:string",
requires = IS_IN_SET(sync_strategy.values(),
multiple=True,
zero=None),
default = sync_strategy.values(),
label = T("Strategy"),
represent = sync_strategy_represent,
widget = CheckboxesWidgetS3.widget),
Field("update_method", "integer",
# hide while not implemented
readable=False,
writable=False,
requires = IS_IN_SET(sync_update_method,
zero=None),
default = 1,
label = T("Update Method"),
represent = lambda opt: \
sync_update_method.get(opt,
NONE)),
Field("update_policy",
requires = IS_IN_SET(sync_policies,
zero=None),
default = sync_policies.NEWER,
label = T("Update Policy"),
represent = sync_policy_represent),
Field("conflict_policy",
requires = IS_IN_SET(sync_policies,
zero=None),
default = sync_policies.NEWER,
label = T("Conflict Policy"),
represent = sync_policy_represent),
*s3_meta_fields())
# Field configuration
# @todo: make in-line
table = db[tablename]
table.resource_name.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Resource Name"),
T("Table name of the resource to synchronize")))
table.mode.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Synchronization mode"),
T("How data shall be transferred")))
table.strategy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Strategy"),
T("Which methods to apply when importing data to the local repository")))
table.update_method.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Update Method"),
T("How local records shall be updated")))
table.update_policy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Update Policy"),
T("Under which conditions local records shall be updated")))
table.conflict_policy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Conflict Policy"),
T("Under which condition a local record shall be updated if it also has been modified locally since the last synchronization")))
# CRUD Strings
ADD_TASK = T("Create Resource")
crud_strings[tablename] = Storage(
label_create = ADD_TASK,
title_display = T("Resource Configuration"),
title_list = T("Resources"),
title_update = T("Edit Resource Configuration"),
label_list_button = T("List Resources"),
msg_record_created = T("Resource configured"),
msg_record_modified = T("Resource configuration updated"),
msg_record_deleted = T("Resource configuration deleted"),
msg_list_empty = T("No resources configured yet"))
# Resource Configuration
configure(tablename,
create_onvalidation=self.sync_task_onvalidation)
# Reusable Field
task_represent = self.sync_task_represent
task_id = S3ReusableField("task_id", "reference %s" % tablename,
requires = IS_ONE_OF(db,
"sync_task.id",
task_represent),
represent = task_represent,
label = T("Task"))
# Components
add_components(tablename,
sync_resource_filter="task_id",
)
# -------------------------------------------------------------------------
# Filters
# -------------------------------------------------------------------------
tablename = "sync_resource_filter"
define_table(tablename,
task_id(),
Field("tablename",
label = T("Table"),
requires = IS_NOT_EMPTY()),
Field("filter_string",
label = T("Filter"),
requires = IS_NOT_EMPTY()),
*s3_meta_fields())
onaccept = self.sync_resource_filter_onaccept
configure(tablename,
list_fields = ["id",
"task_id$repository_id",
"task_id$resource_name",
"tablename",
"filter_string"],
onaccept = onaccept,
ondelete = onaccept)
# -------------------------------------------------------------------------
# Job
# -------------------------------------------------------------------------
tablename = "sync_job"
define_table(tablename,
repository_id(),
s3.scheduler_task_id(),
*s3_meta_fields())
# CRUD Strings
ADD_JOB = T("Create Job")
crud_strings[tablename] = Storage(
label_create = ADD_JOB,
title_display = T("Synchronization Job"),
title_list = T("Synchronization Schedule"),
title_update = T("Edit Job"),
label_list_button = T("List Jobs"),
msg_record_created = T("Job added"),
msg_record_modified = T("Job updated"),
msg_record_deleted = T("Job deleted"),
msg_list_empty = T("No jobs configured yet"),
msg_no_match = T("No jobs configured"))
# Resource Configuration
set_method("sync", "repository",
component_name="job",
method="reset",
action=sync_job_reset)
# -------------------------------------------------------------------------
# Log
# -------------------------------------------------------------------------
tablename = "sync_log"
define_table(tablename,
Field("timestmp", "datetime",
represent=s3_datetime_represent,
label=T("Date/Time")),
repository_id(),
Field("resource_name"),
# Synchronization mode: PULL/PUSH, IN/OUT
Field("mode"),
Field("action"),
Field("result"),
Field("remote", "boolean",
default=False,
label=T("Remote Error"),
represent=lambda opt: opt and T("yes") or ("no")),
Field("message", "text",
represent=s3_strip_markup),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_display = T("Log Entry"),
title_list = T("Synchronization Log"),
label_list_button = T("List All Entries"),
msg_record_deleted = T("Log Entry Deleted"),
msg_list_empty = T("No entries found"),
msg_no_match = T("No entries found"))
# Resource Configuration
configure(tablename,
editable=False,
insertable=False,
deletable=True,
orderby="sync_log.timestmp desc")
# ---------------------------------------------------------------------
# Return global names to s3.*
#
return Storage()
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if module is disabled """
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_represent(rid):
""" Repository representation """
db = current.db
rtable = current.s3db.sync_repository
repository = db(rtable.id == rid).select(rtable.name,
limitby=(0, 1)).first()
try:
return repository.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_last_pull_time(row):
""" Last pull synchronization date/time for this repository """
try:
repository_id = row["sync_repository.id"]
except AttributeError:
return "-"
table = current.s3db.sync_task
query = (table.repository_id == repository_id)
task = current.db(query).select(orderby=~table.last_pull,
limitby=(0,1)).first()
if task and task.last_pull:
return S3DateTime.datetime_represent(task.last_pull, utc=True)
else:
return current.T("never")
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_last_push_time(row):
""" Last push synchronization date/time for this repository """
try:
repository_id = row["sync_repository.id"]
except AttributeError:
return "-"
table = current.s3db.sync_task
query = (table.repository_id == repository_id)
task = current.db(query).select(orderby=~table.last_push,
limitby=(0,1)).first()
if task and task.last_push:
return S3DateTime.datetime_represent(task.last_push, utc=True)
else:
return current.T("never")
# -------------------------------------------------------------------------
@staticmethod
def sync_task_represent(task_id):
""" Task representation """
s3db = current.s3db
ttable = s3db.sync_task
rtable = s3db.sync_repository
query = (ttable.id == task_id) & \
(rtable.id == ttable.repository_id)
db = current.db
task = db(query).select(ttable.resource_name,
rtable.name,
limitby=(0, 1)).first()
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
if task:
repository = task[rtable.name] or UNKNOWN_OPT
resource = task[ttable.resource_name] or UNKNOWN_OPT
return "%s: %s" % (repository, resource)
else:
return UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_ondelete(row):
"""
Cleanup after repository deletion
@todo: use standard delete cascade
"""
db = current.db
s3db = current.s3db
# Remove the URL to allow re-setup of the same repo
rtable = s3db.sync_repository
db(rtable.id == row.id).update(url=None)
# Delete all resources in this repository
ttable = s3db.sync_task
db(ttable.repository_id == row.id).update(deleted=True)
# Delete all jobs for this repository
# @todo: remove scheduler_task entry as well
jtable = s3db.sync_job
db(jtable.repository_id == row.id).update(deleted=True)
# Delete all pending conflicts of this repository
#ctable = s3db.sync_conflict
#db(ctable.repository_id == row.id).delete()
# Delete all log entries for this repository
ltable = s3db.sync_log
db(ltable.repository_id == row.id).delete()
return
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_onaccept(form):
"""
Send registration request to the peer
"""
try:
repository_id = form.vars.id
except:
return
sync = current.sync
if repository_id:
rtable = current.s3db.sync_repository
query = (rtable.id == repository_id)
repository = current.db(query).select(limitby=(0, 1)).first()
if repository and repository.url:
from s3.s3sync import S3SyncRepository
connector = S3SyncRepository(repository)
success = connector.register()
if not success:
current.response.warning = \
current.T("Could not auto-register at the repository, please register manually.")
else:
current.response.confirmation = \
current.T("Successfully registered at the repository.")
return
# -------------------------------------------------------------------------
@staticmethod
def sync_task_onvalidation(form):
"""
Task record validation
"""
repository_id = form.vars.repository_id or \
current.request.post_vars.repository_id
resource_name = form.vars.resource_name
if repository_id and resource_name:
ttable = current.s3db.sync_task
query = (ttable.repository_id == repository_id) & \
(ttable.resource_name == resource_name) & \
(ttable.deleted != True)
row = current.db(query).select(ttable.id,
limitby=(0, 1)).first()
if row:
form.errors.resource_name = \
T("This resource is already configured for this repository")
# -------------------------------------------------------------------------
@staticmethod
def sync_resource_filter_onaccept(form):
"""
Reset last_push when adding/changing a filter
"""
db = current.db
s3db = current.s3db
ttable = s3db.sync_task
ftable = s3db.sync_resource_filter
if isinstance(form, Row):
filter_id = form.id
else:
try:
filter_id = form.vars.id
except:
return
row = db(ftable.id == filter_id).select(ftable.id,
ftable.deleted,
ftable.task_id,
ftable.deleted_fk,
limitby=(0, 1)).first()
if row:
task_id = None
if row.deleted:
try:
deleted_fk = json.loads(row.deleted_fk)
except:
return
if "task_id" in deleted_fk:
task_id = deleted_fk["task_id"]
else:
task_id = row.task_id
if task_id:
db(ttable.id == task_id).update(last_push=None)
return
# =============================================================================
def sync_rheader(r, tabs=[]):
"""
Synchronization resource headers
"""
if r.representation == "html":
if r.tablename == "sync_repository":
T = current.T
repository = r.record
if r.component and r.component_name=="log" and not r.component_id:
purge_log = A(T("Remove all log entries"),
_href=r.url(method="delete"))
else:
purge_log = ""
if repository:
if repository.url:
tabs.append((T("Manual Synchronization"), "now"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(
TR(TH("%s: " % T("Name")),
repository.name,
TH(""),
purge_log),
TR(TH("URL: "),
repository.url,
TH(""),
""),
), rheader_tabs)
return rheader
return None
# =============================================================================
def sync_job_reset(r, **attr):
"""
RESTful method to reset a job status from FAILED to QUEUED,
for "Reset" action button
"""
if r.interactive:
if r.component and r.component.alias == "job":
job_id = r.component_id
if job_id:
S3Task.reset(job_id)
current.session.confirmation = current.T("Job reactivated")
r.component_id = None
redirect(r.url(method=""))
# =============================================================================
def sync_now(r, **attr):
"""
Manual synchronization of a repository
@param r: the S3Request
@param attr: controller options for the request
"""
T = current.T
auth = current.auth
response = current.response
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(title=T("Manual Synchronization"), rheader=rheader)
s3task = current.s3task
sync = current.sync
if not auth.s3_logged_in():
auth.permission.fail()
if r.interactive:
if r.http in ("GET", "POST"):
repository = r.record
if not repository:
r.error(404, current.ERROR.BAD_RECORD)
form = FORM(TABLE(
TR(TD(T("Click 'Start' to synchronize with this repository now:"))),
TR(TD(INPUT(_type="submit", _value=T("Start"))))))
if form.accepts(r.post_vars, current.session):
task_id = s3task.async("sync_synchronize",
args = [repository.id],
vars = dict(user_id=auth.user.id,
manual=True))
if task_id is False:
response.error = T("Could not initiate manual synchronization.")
elif task_id is None:
response.flash = T("Manual synchronization completed.")
else:
sync.set_status(manual=True)
response.flash = T("Manual synchronization started in the background.")
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(501, current.ERROR.BAD_FORMAT)
status = sync.get_status()
if status.running:
output.update(form=T("Synchronization currently active - refresh page to update status."))
elif not status.manual:
output.update(form=form)
else:
output.update(form=T("Manual synchronization scheduled - refresh page to update status."))
response.view = "update.html"
return output
# END =========================================================================
|
{
"content_hash": "a9d3c3d87bcd70254a67141bae76fb33",
"timestamp": "",
"source": "github",
"line_count": 855,
"max_line_length": 259,
"avg_line_length": 42.31345029239766,
"alnum_prop": 0.4148930289125988,
"repo_name": "gnarula/eden_deployment",
"id": "0053e071044c20683588756357e162452a5a4639",
"size": "36203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3db/sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
}
|
from copy import copy
from corehq.util.couch_helpers import paginate_view, MultiKeyViewArgsProvider
from corehq.util.pagination import paginate_function
from pillowtop.dao.couch import CouchDocumentStore
from pillowtop.feed.interface import Change
from pillowtop.reindexer.change_providers.interface import ChangeProvider
class CouchViewChangeProvider(ChangeProvider):
"""
A ChangeProvider on top of a couch view. Lets you parameterize how you query
the view and will then return an iterator over all the results of that view
query.
This is meant to eventually replace the logic in the PtopReindexer subclasses
that currently deal with this.
"""
def __init__(self, couch_db, view_name, chunk_size=100, view_kwargs=None):
self._couch_db = couch_db
self._view_name = view_name
self._chunk_size = chunk_size
self._view_kwargs = view_kwargs or {}
def iter_all_changes(self, start_from=None):
view_kwargs = copy(self._view_kwargs)
view_kwargs['reduce'] = False # required to paginate a view
if start_from is not None:
# todo: should we abstract out how the keys work inside this class?
view_kwargs['startkey'] = start_from
for row in paginate_view(self._couch_db, self._view_name, self._chunk_size, **view_kwargs):
# todo: if include_docs isn't specified then this will make one request to couch per row
# to get the documents. In the future we will likely need to add chunking
yield Change(id=row['id'], sequence_id=None, document=row.get('doc'), deleted=False,
document_store=CouchDocumentStore(self._couch_db))
class CouchDomainDocTypeChangeProvider(ChangeProvider):
def __init__(self, couch_db, domains, doc_types, chunk_size=1000, event_handler=None):
self.domains = domains
self.doc_types = doc_types
self.chunk_size = chunk_size
self.couch_db = couch_db
self.event_handler = event_handler
def iter_all_changes(self, start_from=None):
if not self.domains:
return
def data_function(**view_kwargs):
return self.couch_db.view('by_domain_doc_type_date/view', **view_kwargs)
keys = []
for domain in self.domains:
for doc_type in self.doc_types:
keys.append([domain, doc_type])
args_provider = MultiKeyViewArgsProvider(keys, include_docs=True, chunk_size=self.chunk_size)
for row in paginate_function(data_function, args_provider, event_handler=self.event_handler):
yield Change(
id=row['id'],
sequence_id=None,
document=row.get('doc'),
deleted=False,
document_store=None
)
|
{
"content_hash": "eebd8346f8b70a5d05348c9e3a1aa3a7",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 101,
"avg_line_length": 42.07462686567164,
"alnum_prop": 0.6520042568286627,
"repo_name": "qedsoftware/commcare-hq",
"id": "398a24e6ae3fecb1719be5df28bf0107d9b39647",
"size": "2819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/pillowtop/reindexer/change_providers/couch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
import ansiblelint.utils
from ansiblelint import AnsibleLintRule
class TaskHasTag(AnsibleLintRule):
id = 'AERISCLOUD0002'
shortdesc = 'Tasks must have tag'
description = 'Tasks must have tag'
tags = ['productivity']
def matchtask(self, file, task):
# The meta files don't have tags
if file['type'] == 'meta':
return False
if isinstance(task, basestring):
return False
if file['type'] == 'playbook':
return False
# If the task include another task or make the playbook fail
# Don't force to have a tag
if task['action']['__ansible_module__'] in ['include', 'fail']:
return False
role = ansiblelint.utils.rolename(file['path'])
# Ignore role from galaxy
if role.find('.') > -1:
return False
# Task should have tags
if 'tags' not in task:
return True
if role.find('ansible-') > -1:
role = role[8:]
if role and role not in task['tags']:
return 'The tag "' + role + '" is not present in this block.'
if task['action']['__ansible_module__'] == 'apt' and 'pkgs' not in task['tags']:
return 'The tag "pkgs" must be present'
if task['action']['__ansible_module__'] == 'apt_repository' \
and 'repos' not in task['tags']:
return 'The tag "repos" must be present'
if task['action']['__ansible_module__'] == 'yum' \
and set(task['tags']).isdisjoint(['repos', 'pkgs']):
return 'One of the following tags must be present "repos", "pkgs"'
if task['action']['__ansible_module__'] == 'copy' \
and task['action']['dest'].find('/etc/yum.repos.d') >= 0 \
and 'repos' not in task['tags']:
return 'The tag "repos" must be present'
if task['action']['__ansible_module__'] in ['copy', 'template'] \
and 'files' not in task['tags']:
return 'The tag "files" must be present'
if task['action']['__ansible_module__'] == 'sysctl' \
and 'sysctl' not in task['tags']:
return 'The tag "sysctl" must be present'
if task['action']['__ansible_module__'] == 'aeriscloud_service' \
and 'announce' not in task['tags']:
return 'The tag "announce" must be present'
return False
|
{
"content_hash": "548f0f21b01c2542fdd10e1aeea3aa2b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 88,
"avg_line_length": 34.42253521126761,
"alnum_prop": 0.5376432078559739,
"repo_name": "AerisCloud/AerisCloud",
"id": "47a2552b5ed5690c90ad3d04804ef6e23db22c12",
"size": "2444",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ansible/rules/TaskHasTag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2404"
},
{
"name": "Python",
"bytes": "213590"
},
{
"name": "Ruby",
"bytes": "6901"
},
{
"name": "Shell",
"bytes": "15381"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0015_add_home_page_and_email_to_program_page'),
]
operations = [
migrations.RemoveField(
model_name='programpage',
name='contact_us',
),
]
|
{
"content_hash": "611a77d5488f87bd9144ae0aeb6ab341",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 20.352941176470587,
"alnum_prop": 0.5982658959537572,
"repo_name": "mitodl/micromasters",
"id": "7334330bc26f231210849f4683aedf534fb7d88b",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0016_remove_programpage_contact_us.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
}
|
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v12.services.types import geo_target_constant_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GeoTargetConstantServiceTransport(abc.ABC):
"""Abstract transport class for GeoTargetConstantService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.suggest_geo_target_constants: gapic_v1.method.wrap_method(
self.suggest_geo_target_constants,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def suggest_geo_target_constants(
self,
) -> Callable[
[geo_target_constant_service.SuggestGeoTargetConstantsRequest],
Union[
geo_target_constant_service.SuggestGeoTargetConstantsResponse,
Awaitable[
geo_target_constant_service.SuggestGeoTargetConstantsResponse
],
],
]:
raise NotImplementedError()
__all__ = ("GeoTargetConstantServiceTransport",)
|
{
"content_hash": "201914661aee857cbed6176b5344e547",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 101,
"avg_line_length": 38.801418439716315,
"alnum_prop": 0.6251142387132151,
"repo_name": "googleads/google-ads-python",
"id": "d4257c763baebe9631f024ce96f42adb58a5f038",
"size": "6071",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/geo_target_constant_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
""" this module handles the rest calls to the roxen server """
def request():
pass
def response():
return "ok"
|
{
"content_hash": "c166afe8488affc64973b1d9674c7f3b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.65,
"repo_name": "whojarr/roxentools",
"id": "be13f73ebbcd5ce5f6b1121809cfa073588ddc4f",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roxentools/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13701"
}
],
"symlink_target": ""
}
|
import abc
import json
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from pygce.models.garmin import utils
class GCDaySection:
"""
Standard section in the Garmin Connect timeline of day.
"""
def __init__(self, raw_html, tag=""):
"""
:param raw_html: str
HTML source snippet with information about section
:param tag: str
Unique str in order not to mistake this GCDaySection with another one
"""
self.tag = tag # unique key in order not to mistake this GCDaySection with another one
self.html = str(raw_html)
self.soup = BeautifulSoup(self.html, "html.parser")
@abc.abstractmethod
def parse(self):
"""
:return: void
Parses raw html source and tries to finds all information
"""
def to_dict(self):
"""
:return: dict
Dictionary with keys (obj fields) and values (obj values)
"""
return {}
def to_json(self):
"""
:return: json object
A json representation of this object
"""
d = self.to_dict()
for k in d.keys():
d[k] = str(d[k]) # convert to string to be json serializable
return json.dumps(d)
def to_csv_dict(self):
"""
:return: {}
Like self.to_json() but with a unique str before each key to spot against different GCDaySections
"""
d = self.to_dict()
csv_d = {}
for k in d.keys():
new_key = str(self.tag) + ":" + k
csv_d[new_key] = str(d[k]) # edit key
return csv_d
class GCDaySummary(GCDaySection):
"""
Standard activity in the Garmin Connect timeline of day.
Common features are likes, comment, kcal
"""
def __init__(self, raw_html):
"""
:param raw_html: str
HTML source snippet with information about section
"""
super().__init__(raw_html, tag="SUMMARY")
self.likes = None
self.comment = None
self.kcal_count = None
def parse(self):
try:
self.parse_likes()
except:
pass
try:
self.parse_comment()
except:
pass
try:
self.parse_kcal_count()
except:
pass
def parse_likes(self):
"""
:return: void
Finds likes count and stores value
"""
container = \
self.soup.find_all("div", {"class": "span4 page-navigation"})[0]
container = \
container.find_all("span", {"class": "like js-like-count"})[0]
likes = container.text.strip().split(" ")[0]
self.likes = utils.parse_num(likes)
def parse_comment(self):
"""
:return: void
Finds comment value and stores value
"""
container = self.soup.find_all("div", {"class": "note-container"})[0]
container = container.find_all("textarea", {"id": "noteTextarea"})[0]
comment = str(container.text).strip()
self.comment = comment
def parse_kcal_count(self):
"""
:return: void
Finds kcal value and stores value
"""
container = self.soup.find_all("div", {
"class": "span8 daily-summary-stats-placeholder"})[0]
container = container.find_all("div", {"class": "row-fluid top-xl"})[0]
kcal_count = container.find_all("div", {"class": "data-bit"})[0].text
self.kcal_count = utils.parse_num(kcal_count)
def to_dict(self):
return {
"likes": self.likes,
"comment": self.comment,
"kcal_count": self.kcal_count
}
class GCDaySteps(GCDaySection):
"""
Standard activity in the Garmin Connect timeline of day.
Common features are total, goal, distance, avg daily
"""
def __init__(self, raw_html):
"""
:param raw_html: str
HTML source snippet with information about section
"""
super().__init__(raw_html, tag="STEPS")
self.total = None
self.goal = None
self.avg = None
self.distance = None
def parse(self):
try:
self.parse_steps_count()
except:
pass
try:
self.parse_steps_stats()
except:
pass
def parse_steps_count(self):
"""
:return: void
Parses HTML source and finds goal and daily steps
"""
container = \
self.soup.find_all("div", {"class": "span4 text-center charts"})[0]
total = container.find_all("div", {"class": "data-bit"})[
0].text # finds total steps
self.total = utils.parse_num(total)
goal = \
container.find_all("div", {"class": "h5"})[0].text.strip().split(
" ")[
-1].strip()
self.goal = utils.parse_num(goal)
def parse_steps_stats(self):
"""
:return: void
Parses HTML source and finds daily distance and avg daily steps
"""
container = self.soup.find_all("div", {
"class": "span8 daily-summary-stats-placeholder"})[0]
container = container.find_all("div", {"class": "row-fluid top-xl"})[0]
container = container.find_all("div", {"class": "data-bit"})
self.distance = utils.parse_num(container[1].text.split("km")[0])
self.avg = utils.parse_num(container[2].text)
def to_dict(self):
return {
"total": self.total,
"goal": self.goal,
"avg": self.avg,
"distance": self.distance
}
class GCDetailsSteps(GCDaySection):
"""Steps divided into 15-minute bins"""
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
OUT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def __init__(self, date_time, raw_html):
super().__init__(raw_html, tag="STEPS DETAILS")
self.date_time = date_time
self.content = json.loads(self.html)
self.bins = []
@staticmethod
def parse_steps_count(raw):
raw = str(raw)
if raw.endswith('.0'): # remove decimal point
raw = raw[:-2]
raw = raw.replace('.', '') # remove thousands point
return int(raw)
def parse(self):
for data in self.content:
date_time = data['startGMT'][:-2] # remove trailing 0
date_time = datetime.strptime(date_time, self.DATE_FORMAT)
date_time = date_time.strftime(self.OUT_DATE_FORMAT)
steps_count = int(data['steps'])
self.bins.append({
'time': date_time,
'steps': self.parse_steps_count(steps_count)
})
def to_dict(self):
return {
'15-min bins': self.bins
}
class GCDaySleep(GCDaySection):
"""
Standard activity in the Garmin Connect timeline of day.
Common features are total, deep total, light total, awake total
"""
def __init__(self, raw_html):
"""
:param raw_html: str
HTML source snippet with information about section
"""
super().__init__(raw_html, tag="SLEEP")
self.night_sleep_time = None
self.nap_time = None
self.total_sleep_time = None # typically night_sleep + nap
self.bed_time = None
self.wake_time = None
self.deep_sleep_time = None
self.light_sleep_time = None
self.awake_sleep_time = None # time during night you were awake
def parse(self):
try:
self.parse_sleep_totals()
except:
pass
try:
self.parse_bed_time()
except:
pass
try:
self.parse_sleep_times()
except:
pass
def parse_sleep_totals(self):
"""
:return: void
Finds value of night/nap/total sleep times
"""
container = \
self.soup.find_all("div", {"class": "equation centered"})[0]
times = container.find_all("div", {"class": "data-bit"})
times = [str(t.text).strip() for t in times] # strip texts
self.night_sleep_time = utils.parse_hh_mm(times[0])
self.nap_time = utils.parse_hh_mm(times[1])
self.total_sleep_time = utils.parse_hh_mm(times[2].split(" ")[0])
def parse_bed_time(self):
"""
:return: void
Finds hour start/end sleep
"""
times = self.soup.find_all(
"div", {"class": "time-inline-edit-placeholder"}
)
times = [str(t.text).strip() for t in times] # strip texts
self.bed_time = datetime.strptime(
times[0], "%I:%M %p").time() # account for AM/PM
self.wake_time = datetime.strptime(
times[1], "%I:%M %p").time() # account for AM/PM
def parse_sleep_times(self):
"""
:return: void
Finds deep/light/awake sleep times
"""
base_class = "span4 text-center sleep-chart-secondary"
container = self.soup.find_all("div", {
"class": base_class + " deep-sleep-circle-chart-placeholder"})[
0]
self.deep_sleep_time = utils.parse_hh_mm(
container.find_all("span")[0].text.split("hrs")[0])
container = self.soup.find_all("div", {
"class": base_class + " light-sleep-circle-chart-placeholder"})[
0]
self.light_sleep_time = utils.parse_hh_mm(
container.find_all("span")[0].text.split("hrs")[0])
container = self.soup.find_all("div", {
"class": base_class + " awake-circle-chart-placeholder"})[
0]
self.awake_sleep_time = utils.parse_hh_mm(
container.find_all("span")[0].text.split("hrs")[0])
def to_dict(self):
return {
"night_sleep_time": self.night_sleep_time,
"nap_time": self.nap_time,
"total_sleep_time": self.total_sleep_time,
"bed_time": self.bed_time,
"wake_time": self.wake_time,
"deep_sleep_time": self.deep_sleep_time,
"light_sleep_time": self.light_sleep_time,
"awake_sleep_time": self.awake_sleep_time
}
class GCDayActivities(GCDaySection):
"""
Standard activity in the Garmin Connect timeline of day.
Common features are kcal, time, distance, type, name, link
"""
GPX_DOWNLOAD_URL = "https://connect.garmin.com/modern/proxy/download-service/export/gpx/activity/"
def __init__(self, raw_html):
"""
:param raw_html: str
HTML source snippet with information about section
"""
super().__init__(raw_html, tag="ACTIVITIES")
self.activities = []
def parse(self):
rows = self.soup.find_all("tr")
for r in rows[1:]: # discard header
try:
activity = self.parse_activity(r)
self.activities.append(activity)
except:
pass
@staticmethod
def parse_activity(raw_html):
"""
:param raw_html: str html code
Raw HTML code of row of table containing activity to parse
:return: dict
Dict with values of activity
"""
columns = raw_html.find_all("td")
time_day = columns[0].text.strip() # parse time of the day
try:
time_day = datetime.strptime(columns[0].text.strip(),
"%I:%M %p").time() # account for AM/PM
except:
pass
try:
duration = utils.parse_hh_mm_ss(
columns[2].text.strip()) # in case of multiple hours
except:
duration = utils.parse_hh_mm_ss("00:00")
link = str(columns[5].a["href"]).strip()
id_ref = link.split("/")[-1]
try:
url = utils.GARMIN_CONNECT_URL + link
except:
url = None
return {
"time_day": time_day,
"kcal": utils.parse_num(columns[1].text),
"duration": duration,
"distance": utils.parse_num(columns[3].text.split("km")[0]),
"type": columns[4].text.strip(),
"name": columns[5].text.strip(),
"url": url,
"gpx": GCDayActivities.GPX_DOWNLOAD_URL + id_ref
}
def to_dict(self):
return {
"activities": self.activities
}
def to_json(self):
activities = self.activities
for a in activities:
for k in a.keys():
a[k] = str(a[k]) # convert each field to string
return json.dumps(activities)
def to_csv_dict(self):
"""
:return: {}
Like super.to_csv_dict() but with totals instead
"""
d = self.get_totals_dict()
csv_d = {}
for k in d.keys():
new_key = str(self.tag) + ":" + k
csv_d[new_key] = str(d[k]) # edit key
return csv_d
def get_total_kcal(self):
"""
:return: float
Total kcal of all activities
"""
return sum(a["kcal"] for a in self.activities)
def get_total_duration(self):
"""
:return: timedelta
Total duration of all activities
"""
all_durations = [a["duration"] for a in
self.activities] # fetch duration of all activities
total_duration = timedelta(hours=0, minutes=0, seconds=0)
for duration in all_durations:
total_duration += timedelta(hours=duration.hour,
minutes=duration.minute,
seconds=duration.second) # sum all durations
return total_duration
def get_total_distance(self):
"""
:return: float
Total distance of all activities
"""
return sum(a["distance"] for a in self.activities)
def get_totals_dict(self):
"""
:return: {}
Self dict but with totals instead (total kcal, total distance ...)
"""
return {
"kcal": self.get_total_kcal(),
"duration": str(self.get_total_duration()),
"distance": self.get_total_distance(),
}
class GCDayBreakdown(GCDaySection):
"""
Standard activity in the Garmin Connect timeline of day.
Common features are highly active %, active %, sedentary %, sleep %
"""
def __init__(self, raw_html):
"""
:param raw_html: str
HTML source snippet with information about section
"""
super().__init__(raw_html, tag="BREAKDOWN")
self.highly_active = None
self.active = None
self.sedentary = None
self.sleeping = None
def parse(self):
values = self.soup.find_all("tspan")
values = [str(v.text).strip().replace("%", "") for v in
values] # remove jibberish
try:
self.highly_active = utils.parse_num(values[0])
except:
pass # None
try:
self.active = utils.parse_num(values[1])
except:
pass # None
try:
self.sedentary = utils.parse_num(values[2])
except:
pass # None
try:
self.sleeping = utils.parse_num(values[3])
except:
pass # None
def to_dict(self):
return {
"highly_active": self.highly_active,
"active": self.active,
"sedentary": self.sedentary,
"sleeping": self.sleeping
}
class GCDayTimeline(object):
"""
Standard Garmin Connect timeline of day as in webpage.
Each standard day consists of different sections:
- summary (day, likes, comment, kcal)
- steps (total, goal, distance, avg daily)
- sleep (total, deep total, light total, awake total)
- activities (for each one: kcal, time, distance, type, name, link)
- breakdown (highly active %, active %, sedentary %, sleep %)
"""
def __init__(self, date_time, summary_html,
steps_section_html, steps_details_html,
sleep_section_html, activities_section_html,
breakdown_section_html):
"""
:param date_time: datetime
Datetime of day
:param summary_html: str
HTML source snippet with information about the day
:param steps_section_html: str
HTML source snippet with information about daily steps
:param sleep_section_html: str
HTML source snippet with information about daily sleep
:param activities_section_html: str
HTML source snippet with information about daily activities
:param breakdown_section_html: str
HTML source snippet with information about daily breakdown
"""
object.__init__(self)
self.date = date_time.date()
self.sections = {
"summary": GCDaySummary(summary_html),
"steps": GCDaySteps(steps_section_html),
"steps details": GCDetailsSteps(date_time, steps_details_html),
"sleep": GCDaySleep(sleep_section_html),
"activities": GCDayActivities(activities_section_html),
"breakdown": GCDayBreakdown(breakdown_section_html)
} # list of sections in day
def parse(self):
"""
:return: void
Finds all sections to parse, then builds corresponding objects and parses everything
"""
for section in self.sections.values(): # parse each section
section.parse()
def __getattr__(self, item):
return self.sections[item]
def to_dict(self):
"""
:return: dict
Dictionary with keys (obj fields) and values (obj values)
"""
return self.sections
def to_csv_dict(self):
"""
:return: {}
Like self.to_dict() but with a set with keys and values NOT nested. Also for activities there are totals only
"""
d = {
"date": str(self.date)
} # resulting dict
for section in self.sections.values():
d.update(section.to_csv_dict()) # add each section keys
return d
def to_json(self):
"""
:return: json object
A json representation of this object
"""
sections_dumps = {} # dict section name -> section json
for s in self.sections.keys():
sections_dumps[s] = json.loads(
self.sections[s].to_json()) # json object
day_dump = {
str(self.date): sections_dumps # add date
}
return json.dumps(day_dump)
|
{
"content_hash": "dc62310058d20018ad97a6a8cd1de4d5",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 121,
"avg_line_length": 29.106317411402156,
"alnum_prop": 0.5344097406034939,
"repo_name": "sirfoga/pygce",
"id": "53debd8adfa3dd9eaefbfc53864c19c1a1469341",
"size": "18940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygce/models/garmin/timeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58267"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django_socketio import events
def home(request, template='index.html'):
context = {"room": "ping",}
return render_to_response(template, context, RequestContext(request))
def msg_test(request, template='msg_test.html'):
context = {"room": "pong",}
return render_to_response(template, context, RequestContext(request))
def review(request, template='review.html'):
context = {"room": "pong",}
return render_to_response(template, context, RequestContext(request))
@events.on_message(channel="ping")
def message(request, socket, context, message):
if message["a"] == "clr":
pos = message["p"]
red = message["r"]
green = message["g"]
blue = message["b"]
msg = {
"p": pos,
"r": red,
"g": green,
"b": blue,
"a": "c",
}
data = {"pid": socket.session.session_id, 'a': 'ack'}
socket.send(data)
socket.broadcast_channel(msg, channel="pong")
@events.on_connect()
def subscribe(request, socket, context):
data = {"pid": socket.session.session_id, 'msg': "Connect"}
socket.send(data)
|
{
"content_hash": "51863350f65ea1852da355e44474df9e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 31.227272727272727,
"alnum_prop": 0.61863173216885,
"repo_name": "ajfisher/sketching-conf-demo",
"id": "fe6a66ca7a21db787c175ff73bcb0522586c5d20",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sketching/sketching/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "19516"
},
{
"name": "C",
"bytes": "2187"
},
{
"name": "C++",
"bytes": "6404"
},
{
"name": "JavaScript",
"bytes": "1312"
},
{
"name": "Objective-C",
"bytes": "4973"
},
{
"name": "Python",
"bytes": "9469"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# lfs imports
from lfs.supplier.models import Supplier
class SupplierAdmin(admin.ModelAdmin):
"""
"""
admin.site.register(Supplier, SupplierAdmin)
|
{
"content_hash": "1da601baa4a49db9b6285d3e12513b2b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 19.1,
"alnum_prop": 0.7539267015706806,
"repo_name": "baffolobill/django-lfs",
"id": "6546d438fd6798225cc21c096332db28c03567fe",
"size": "208",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lfs/supplier/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96502"
},
{
"name": "HTML",
"bytes": "615650"
},
{
"name": "JavaScript",
"bytes": "591493"
},
{
"name": "Python",
"bytes": "1385034"
}
],
"symlink_target": ""
}
|
import re
#apetools
from apetools.commons import errors
from basewificommand import BaseWifiCommand
MAC_UNAVAILABLE = "MAC Unavailable (use `netcfg`)"
CommandError = errors.CommandError
class WifiCommandError(CommandError):
"""
An error to raise if the Wifi Command fails
"""
# end class WifiCommandError
class WifiCommand(BaseWifiCommand):
"""
The Wifi Command interprets `wifi` information
:warning: this was copied from the wl command and needs updating
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `connection`: A connection to the device
- `interface`: The interface to check
- `operating_system` : The operating system on the devices.
"""
super(WifiCommand, self).__init__(*args, **kwargs)
return
@property
def bitrate(self):
return self.get("bitrate").readline()
@property
def interface(self):
"""
:return: the name of the wireless interface
"""
if self._interface is None:
self.logger.warning("wl doesn't use the interface name")
return self._interface
@property
def rssi(self):
"""
This is dynamically generated
:return: The rssi for the interface
"""
output = self.get("rssi")
return output.readline()
@property
def mac_address(self):
"""
:return: MAC Address of the interface
"""
if self._mac_address is None:
output = self.get("mac")
self._mac_address = output.readline()
return self._mac_address
@property
def ssid(self):
"""
:return: the SSID of the currently attched ap
"""
output = self.get('ssid')
return output.readline().split(":")[-1]
@property
def noise(self):
"""
:return: the current noise
"""
return self.get('noise').readline()
@property
def channel(self):
"""
:return: the current channel setting
"""
output = self.get('status')
for line in output:
if "Control channel:" in line:
return line.split(":")[-1].strip()
return
@property
def bssid(self):
"""
:return: the bssid of the attached ap
"""
return self.get('bssid').readline()
def get(self, subcommand):
"""
:param:
- `subcommand`: `wifi` subcommand
:return: stdout for the command
"""
with self.connection.lock:
output, error = self.connection.wifi(subcommand)
err = error.readline()
if "not found" in err:
self.logger.error(err)
raise CommandError("The `wifi` command wasn't found on the device")
if len(err) > 1:
self.logger.error(err)
return output
def _match(self, expression, name, command):
"""
:param:
- `expression`: The regular expression to match
- `name`: The group name to pull the match out of the line
- `command`: The command to send to iw
:return: The named-group that matched or None
"""
expression = re.compile(expression)
with self.connection.lock:
output, error = self.connection.iw(command)
for line in output:
match = expression.search(line)
if match:
return match.group(name)
err = error.read()
if len(err):
self.logger.error(err)
if "No such device" in err:
raise CommandError("Unknown Interface: {0}".format(self.interface))
else:
raise CommandError(err)
return
def __str__(self):
return "({iface}) RSSI: {rssi}".format(iface=self.interface,
rssi=self.rssi)
# end class WifiCommand
if __name__ == "__main__":
from apetools.connections import adbconnection
connection = adbconnection.ADBShellConnection()
iw = IwCommand(connection)
print(str(iw))
|
{
"content_hash": "b815283659698c80b1fbe94b6fbd7806",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 83,
"avg_line_length": 27.294117647058822,
"alnum_prop": 0.5538793103448276,
"repo_name": "rsnakamura/oldape",
"id": "022af6d414683eab98e07b0614a8fd32b5c41373",
"size": "4195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apetools/commands/wificommand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "1076570"
},
{
"name": "Shell",
"bytes": "47671"
}
],
"symlink_target": ""
}
|
import os
import sys
import shutil
from setuptools import setup
#------------------------------------------------------------------------
# Optional building with MAVEN
#------------------------------------------------------------------------
JAVA_SRC = "knit_jvm"
jar_file = os.path.join(JAVA_SRC, "target", "knit-1.0-SNAPSHOT.jar")
with open('requirements.txt') as f:
requires = f.read().splitlines()
if 'mvn' in sys.argv:
os.chdir(JAVA_SRC)
build_cmd = "mvn clean install -q"
os.system(build_cmd)
os.chdir("..")
sys.argv.remove("mvn")
java_lib_dir = os.path.join("knit", "java_libs")
if not os.path.exists(java_lib_dir):
os.mkdir(java_lib_dir)
shutil.copy(jar_file, java_lib_dir)
setup(name='knit',
version="0.2.4",
description='Python wrapper for YARN Applications',
url='http://github.com/dask/knit/',
maintainer='Benjamin Zaitlen',
maintainer_email='bzaitlen@anaconda.com',
license='BSD',
keywords='yarn',
packages=['knit', 'dask_yarn'],
package_data={'knit': ['java_libs/knit-1.0-SNAPSHOT.jar']},
install_requires=requires,
long_description=(open('README.rst').read()
if os.path.exists('README.rst') else ''),
zip_safe=False)
|
{
"content_hash": "af641712ba36722c187ff1f49f6d4747",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 73,
"avg_line_length": 30.476190476190474,
"alnum_prop": 0.553125,
"repo_name": "blaze/knit",
"id": "3ace583c6eaae183d28532c79d67f29aa2ba0563",
"size": "1303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111390"
},
{
"name": "Scala",
"bytes": "22075"
},
{
"name": "Shell",
"bytes": "4274"
}
],
"symlink_target": ""
}
|
from winrm.protocol import Protocol
from st2actions.runners.pythonrunner import Action
__all__ = [
'TryWinRMAction'
]
class TryWinRMAction(Action):
def run(self, host, password, username='Administrator',
port=5732, secure=True):
proto = 'https' if secure else 'http'
p = Protocol(
endpoint='%s://%s:%i/wsman' % (proto, host, port), # RFC 2732?
transport='ntlm',
username=username,
password=password,
server_cert_validation='ignore')
shell_id = p.open_shell()
command_id = p.run_command(shell_id, 'ipconfig', ['/all'])
std_out, std_err, status_code = p.get_command_output(shell_id,
command_id)
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
return {'stdout': std_out, 'stderr': std_err}
|
{
"content_hash": "5ae35ef287848e7b5fd9fd9781ec2f31",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 36.11538461538461,
"alnum_prop": 0.5431309904153354,
"repo_name": "tonybaloney/st2contrib",
"id": "9f3f40561ece95a594e7858fce9fb74d3fcd5155",
"size": "939",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "packs/windows/actions/try_winrm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5392"
},
{
"name": "Python",
"bytes": "1285946"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7547"
}
],
"symlink_target": ""
}
|
import sys
import logging
import warnings
import click
import fiona
import rasterio
from shapely.geometry import mapping
import drapery
"""
See
https://github.com/mapbox/rasterio/blob/master/rasterio/rio/sample.py
"""
@click.command(options_metavar='<options>')
@click.argument('source_f', nargs=1, type=click.Path(exists=True), metavar='<source_file>')
@click.argument('raster_f', nargs=1, type=click.Path(exists=True), metavar='<raster_file>')
@click.option('-o', '--output', metavar='<output_file>', type=click.Path(), help="Output file path")
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode')
def cli(source_f, raster_f, output, verbose):
"""
Convert 2D geometries to 3D.
\b
Example:
drape point.shp elevation.tif -o point_z.shp
"""
with fiona.open(source_f, 'r') as source:
source_driver = source.driver
source_crs = source.crs
sink_schema = source.schema.copy()
source_geom = source.schema['geometry']
if source_geom == 'Point':
sink_schema['geometry'] = '3D Point'
elif source_geom == 'LineString':
sink_schema['geometry'] = '3D LineString'
elif source_geom == '3D Point' or source_geom == '3D LineString':
pass
else:
click.BadParameter("Source geometry type {} not implemented".format(source_geom))
with rasterio.open(raster_f) as raster:
if source_crs != raster.crs:
click.BadParameter("Features and raster have different CRS.")
if raster.count > 1:
warnings.warn("Found {0} bands in {1}, expected a single band raster".format(raster.bands, raster_f))
supported = ['int16', 'int32', 'float32', 'float64']
if raster.dtypes[0] not in supported:
warnings.warn("Found {0} type in {1}, expected one of {2}".format(raster.dtypes[0], raster_f, supported))
with fiona.open(
output, 'w',
driver=source_driver,
crs=source_crs,
schema=sink_schema) as sink:
for feature in source:
try:
feature_z = drapery.drape(raster, feature)
sink.write({
'geometry': mapping(feature_z),
'properties': feature['properties'],
})
except Exception:
logging.exception("Error processing feature %s:", feature['id'])
#print(sink.closed)
#print(raster.closed)
#print(source.closed)
|
{
"content_hash": "3dc7569db1aa7cda192a08c1c8ab6841",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 121,
"avg_line_length": 37.82857142857143,
"alnum_prop": 0.574773413897281,
"repo_name": "mrahnis/drapery",
"id": "51aadbf5a985e0eaa7442defdfea7f7e1f6cc431",
"size": "2648",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "drapery/cli/drape.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "413"
},
{
"name": "Python",
"bytes": "94966"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.us_holidays as bsu
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'us_holidays',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.us_holidays", ALL))
@pytest.mark.sampledata
def test_us_holidays():
import bokeh.sampledata.us_holidays as bsu
assert isinstance(bsu.us_holidays, list)
# check detail for package data
assert len(bsu.us_holidays) == 305
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "70bfb0f1968d453365a2991524ed2ceb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 86,
"avg_line_length": 32.07843137254902,
"alnum_prop": 0.30378973105134477,
"repo_name": "timsnyder/bokeh",
"id": "c4872a1852dc8f8eecd38bf80e1487c52033b08d",
"size": "2140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/tests/test_us_holidays.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
}
|
from impacket.dcerpc.v5 import ndr
from impacket.dcerpc.v5.ndr import NDRCALL, NDR, NDRSTRUCT, NDRENUM, NDRUNION, NDRPOINTER, NDRUniConformantArray, NDRUniFixedArray, NDRUniConformantVaryingArray
from impacket.dcerpc.v5.dtypes import *
from impacket import system_errors, nt_errors
from impacket.uuid import uuidtup_to_bin
from impacket.dcerpc.v5.enum import Enum
from impacket.dcerpc.v5.samr import OLD_LARGE_INTEGER
from impacket.dcerpc.v5.lsad import PLSA_FOREST_TRUST_INFORMATION
from impacket.structure import Structure
from impacket import ntlm
from impacket import crypto
import hmac, hashlib
try:
from Crypto.Cipher import DES, AES, ARC4
except Exception:
print "Warning: You don't have any crypto installed. You need PyCrypto"
print "See http://www.pycrypto.org/"
MSRPC_UUID_NRPC = uuidtup_to_bin(('12345678-1234-ABCD-EF00-01234567CFFB', '1.0'))
class DCERPCSessionError(Exception):
def __init__( self, packet = None, error_code = None):
Exception.__init__(self)
self.packet = packet
if packet is not None:
self.error_code = packet['ErrorCode']
else:
self.error_code = error_code
def get_error_code( self ):
return self.error_code
def get_packet( self ):
return self.packet
def __str__( self ):
key = self.error_code
if (system_errors.ERROR_MESSAGES.has_key(key)):
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'NRPC SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
elif (nt_errors.ERROR_MESSAGES.has_key(key)):
error_msg_short = nt_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = nt_errors.ERROR_MESSAGES[key][1]
return 'NRPC SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'NRPC SessionError: unknown error code: 0x%x' % (self.error_code)
################################################################################
# CONSTANTS
################################################################################
# 2.2.1.2.5 NL_DNS_NAME_INFO
# Type
NlDnsLdapAtSite = 22
NlDnsGcAtSite = 25
NlDnsDsaCname = 28
NlDnsKdcAtSite = 30
NlDnsDcAtSite = 32
NlDnsRfc1510KdcAtSite = 34
NlDnsGenericGcAtSite = 36
# DnsDomainInfoType
NlDnsDomainName = 1
NlDnsDomainNameAlias = 2
NlDnsForestName = 3
NlDnsForestNameAlias = 4
NlDnsNdncDomainName = 5
NlDnsRecordName = 6
# 2.2.1.3.15 NL_OSVERSIONINFO_V1
# wSuiteMask
VER_SUITE_BACKOFFICE = 0x00000004
VER_SUITE_BLADE = 0x00000400
VER_SUITE_COMPUTE_SERVER = 0x00004000
VER_SUITE_DATACENTER = 0x00000080
VER_SUITE_ENTERPRISE = 0x00000002
VER_SUITE_EMBEDDEDNT = 0x00000040
VER_SUITE_PERSONAL = 0x00000200
VER_SUITE_SINGLEUSERTS = 0x00000100
VER_SUITE_SMALLBUSINESS = 0x00000001
VER_SUITE_SMALLBUSINESS_RESTRICTED = 0x00000020
VER_SUITE_STORAGE_SERVER = 0x00002000
VER_SUITE_TERMINAL = 0x00000010
# wProductType
VER_NT_DOMAIN_CONTROLLER = 0x00000002
VER_NT_SERVER = 0x00000003
VER_NT_WORKSTATION = 0x00000001
# 2.2.1.4.18 NETLOGON Specific Access Masks
NETLOGON_UAS_LOGON_ACCESS = 0x0001
NETLOGON_UAS_LOGOFF_ACCESS = 0x0002
NETLOGON_CONTROL_ACCESS = 0x0004
NETLOGON_QUERY_ACCESS = 0x0008
NETLOGON_SERVICE_ACCESS = 0x0010
NETLOGON_FTINFO_ACCESS = 0x0020
NETLOGON_WKSTA_RPC_ACCESS = 0x0040
# 3.5.4.9.1 NetrLogonControl2Ex (Opnum 18)
# FunctionCode
NETLOGON_CONTROL_QUERY = 0x00000001
NETLOGON_CONTROL_REPLICATE = 0x00000002
NETLOGON_CONTROL_SYNCHRONIZE = 0x00000003
NETLOGON_CONTROL_PDC_REPLICATE = 0x00000004
NETLOGON_CONTROL_REDISCOVER = 0x00000005
NETLOGON_CONTROL_TC_QUERY = 0x00000006
NETLOGON_CONTROL_TRANSPORT_NOTIFY = 0x00000007
NETLOGON_CONTROL_FIND_USER = 0x00000008
NETLOGON_CONTROL_CHANGE_PASSWORD = 0x00000009
NETLOGON_CONTROL_TC_VERIFY = 0x0000000A
NETLOGON_CONTROL_FORCE_DNS_REG = 0x0000000B
NETLOGON_CONTROL_QUERY_DNS_REG = 0x0000000C
NETLOGON_CONTROL_BACKUP_CHANGE_LOG = 0x0000FFFC
NETLOGON_CONTROL_TRUNCATE_LOG = 0x0000FFFD
NETLOGON_CONTROL_SET_DBFLAG = 0x0000FFFE
NETLOGON_CONTROL_BREAKPOINT = 0x0000FFFF
################################################################################
# STRUCTURES
################################################################################
# 3.5.4.1 RPC Binding Handles for Netlogon Methods
LOGONSRV_HANDLE = WSTR
PLOGONSRV_HANDLE = LPWSTR
# 2.2.1.1.1 CYPHER_BLOCK
class CYPHER_BLOCK(NDRSTRUCT):
structure = (
('Data', '8s=""'),
)
def getAlignment(self):
return 1
NET_API_STATUS = DWORD
# 2.2.1.1.2 STRING
from impacket.dcerpc.v5.lsad import STRING
# 2.2.1.1.3 LM_OWF_PASSWORD
class CYPHER_BLOCK_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return len(CYPHER_BLOCK())*2
class LM_OWF_PASSWORD(NDRSTRUCT):
structure = (
('Data', CYPHER_BLOCK_ARRAY),
)
# 2.2.1.1.4 NT_OWF_PASSWORD
NT_OWF_PASSWORD = LM_OWF_PASSWORD
ENCRYPTED_NT_OWF_PASSWORD = NT_OWF_PASSWORD
# 2.2.1.3.4 NETLOGON_CREDENTIAL
class UCHAR_FIXED_ARRAY(NDRUniFixedArray):
align = 1
def getDataLen(self, data):
return len(CYPHER_BLOCK())
class NETLOGON_CREDENTIAL(NDRSTRUCT):
structure = (
('Data',UCHAR_FIXED_ARRAY),
)
def getAlignment(self):
return 1
# 2.2.1.1.5 NETLOGON_AUTHENTICATOR
class NETLOGON_AUTHENTICATOR(NDRSTRUCT):
structure = (
('Credential', NETLOGON_CREDENTIAL),
('Timestamp', DWORD),
)
class PNETLOGON_AUTHENTICATOR(NDRPOINTER):
referent = (
('Data', NETLOGON_AUTHENTICATOR),
)
# 2.2.1.2.1 DOMAIN_CONTROLLER_INFOW
class DOMAIN_CONTROLLER_INFOW(NDRSTRUCT):
structure = (
('DomainControllerName', LPWSTR),
('DomainControllerAddress', LPWSTR),
('DomainControllerAddressType', ULONG),
('DomainGuid', GUID),
('DomainName', LPWSTR),
('DnsForestName', LPWSTR),
('Flags', ULONG),
('DcSiteName', LPWSTR),
('ClientSiteName', LPWSTR),
)
class PDOMAIN_CONTROLLER_INFOW(NDRPOINTER):
referent = (
('Data', DOMAIN_CONTROLLER_INFOW),
)
# 2.2.1.2.2 NL_SITE_NAME_ARRAY
class RPC_UNICODE_STRING_ARRAY(NDRUniConformantArray):
item = RPC_UNICODE_STRING
class PRPC_UNICODE_STRING_ARRAY(NDRPOINTER):
referent = (
('Data', RPC_UNICODE_STRING_ARRAY),
)
class NL_SITE_NAME_ARRAY(NDRSTRUCT):
structure = (
('EntryCount', ULONG),
('SiteNames', PRPC_UNICODE_STRING_ARRAY),
)
class PNL_SITE_NAME_ARRAY(NDRPOINTER):
referent = (
('Data', NL_SITE_NAME_ARRAY),
)
# 2.2.1.2.3 NL_SITE_NAME_EX_ARRAY
class RPC_UNICODE_STRING_ARRAY(NDRUniConformantArray):
item = RPC_UNICODE_STRING
class PRPC_UNICODE_STRING_ARRAY(NDRPOINTER):
referent = (
('Data', RPC_UNICODE_STRING_ARRAY),
)
class NL_SITE_NAME_EX_ARRAY(NDRSTRUCT):
structure = (
('EntryCount', ULONG),
('SiteNames', PRPC_UNICODE_STRING_ARRAY),
('SubnetNames', PRPC_UNICODE_STRING_ARRAY),
)
class PNL_SITE_NAME_EX_ARRAY(NDRPOINTER):
referent = (
('Data', NL_SITE_NAME_EX_ARRAY),
)
# 2.2.1.2.4 NL_SOCKET_ADDRESS
# 2.2.1.2.4.1 IPv4 Address Structure
class IPv4Address(Structure):
structure = (
('AddressFamily', '<H=0'),
('Port', '<H=0'),
('Address', '<L=0'),
('Padding', '<L=0'),
)
class UCHAR_ARRAY(NDRUniConformantArray):
item = 'c'
class PUCHAR_ARRAY(NDRPOINTER):
referent = (
('Data', UCHAR_ARRAY),
)
class NL_SOCKET_ADDRESS(NDRSTRUCT):
structure = (
('lpSockaddr', PUCHAR_ARRAY),
('iSockaddrLength', ULONG),
)
class NL_SOCKET_ADDRESS_ARRAY(NDRUniConformantArray):
item = NL_SOCKET_ADDRESS
# 2.2.1.2.5 NL_DNS_NAME_INFO
class NL_DNS_NAME_INFO(NDRSTRUCT):
structure = (
('Type', ULONG),
('DnsDomainInfoType', WSTR),
('Priority', ULONG),
('Weight', ULONG),
('Port', ULONG),
('Register', UCHAR),
('Status', ULONG),
)
# 2.2.1.2.6 NL_DNS_NAME_INFO_ARRAY
class NL_DNS_NAME_INFO_ARRAY(NDRUniConformantArray):
item = NL_DNS_NAME_INFO
class PNL_DNS_NAME_INFO_ARRAY(NDRPOINTER):
referent = (
('Data', NL_DNS_NAME_INFO_ARRAY),
)
class NL_DNS_NAME_INFO_ARRAY(NDRSTRUCT):
structure = (
('EntryCount', ULONG),
('DnsNamesInfo', PNL_DNS_NAME_INFO_ARRAY),
)
# 2.2.1.3 Secure Channel Establishment and Maintenance Structures
# ToDo
# 2.2.1.3.5 NETLOGON_LSA_POLICY_INFO
class NETLOGON_LSA_POLICY_INFO(NDRSTRUCT):
structure = (
('LsaPolicySize', ULONG),
('LsaPolicy', PUCHAR_ARRAY),
)
class PNETLOGON_LSA_POLICY_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_LSA_POLICY_INFO),
)
# 2.2.1.3.6 NETLOGON_WORKSTATION_INFO
class NETLOGON_WORKSTATION_INFO(NDRSTRUCT):
structure = (
('LsaPolicy', NETLOGON_LSA_POLICY_INFO),
('DnsHostName', LPWSTR),
('SiteName', LPWSTR),
('Dummy1', LPWSTR),
('Dummy2', LPWSTR),
('Dummy3', LPWSTR),
('Dummy4', LPWSTR),
('OsVersion', RPC_UNICODE_STRING),
('OsName', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('WorkstationFlags', ULONG),
('KerberosSupportedEncryptionTypes', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_WORKSTATION_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_WORKSTATION_INFO),
)
# 2.2.1.3.7 NL_TRUST_PASSWORD
class WCHAR_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return 512
class NL_TRUST_PASSWORD(NDRSTRUCT):
structure = (
('Buffer', WCHAR_ARRAY),
('Length', LPWSTR),
)
# 2.2.1.3.8 NL_PASSWORD_VERSION
class NL_PASSWORD_VERSION(NDRSTRUCT):
structure = (
('ReservedField', ULONG),
('PasswordVersionNumber', ULONG),
('PasswordVersionPresent', ULONG),
)
# 2.2.1.3.9 NETLOGON_WORKSTATION_INFORMATION
class NETLOGON_WORKSTATION_INFORMATION(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('WorkstationInfo', PNETLOGON_WORKSTATION_INFO),
2 : ('LsaPolicyInfo', PNETLOGON_LSA_POLICY_INFO),
}
# 2.2.1.3.10 NETLOGON_ONE_DOMAIN_INFO
class NETLOGON_ONE_DOMAIN_INFO(NDRSTRUCT):
structure = (
('DomainName', RPC_UNICODE_STRING),
('DnsDomainName', RPC_UNICODE_STRING),
('DnsForestName', RPC_UNICODE_STRING),
('DomainGuid', GUID),
('DomainSid', PRPC_SID),
('TrustExtension', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class NETLOGON_ONE_DOMAIN_INFO_ARRAY(NDRUniConformantArray):
item = NETLOGON_ONE_DOMAIN_INFO
class PNETLOGON_ONE_DOMAIN_INFO_ARRAY(NDRPOINTER):
referent = (
('Data', NETLOGON_ONE_DOMAIN_INFO_ARRAY),
)
# 2.2.1.3.11 NETLOGON_DOMAIN_INFO
class NETLOGON_DOMAIN_INFO(NDRSTRUCT):
structure = (
('PrimaryDomain', NETLOGON_ONE_DOMAIN_INFO),
('TrustedDomainCount', ULONG),
('TrustedDomains', PNETLOGON_ONE_DOMAIN_INFO_ARRAY),
('LsaPolicy', NETLOGON_LSA_POLICY_INFO),
('DnsHostNameInDs', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('WorkstationFlags', ULONG),
('SupportedEncTypes', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DOMAIN_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_DOMAIN_INFO),
)
# 2.2.1.3.12 NETLOGON_DOMAIN_INFORMATION
class NETLOGON_DOMAIN_INFORMATION(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('DomainInfo', PNETLOGON_DOMAIN_INFO),
2 : ('LsaPolicyInfo', PNETLOGON_LSA_POLICY_INFO),
}
# 2.2.1.3.13 NETLOGON_SECURE_CHANNEL_TYPE
class NETLOGON_SECURE_CHANNEL_TYPE(NDRENUM):
class enumItems(Enum):
NullSecureChannel = 0
MsvApSecureChannel = 1
WorkstationSecureChannel = 2
TrustedDnsDomainSecureChannel = 3
TrustedDomainSecureChannel = 4
UasServerSecureChannel = 5
ServerSecureChannel = 6
CdcServerSecureChannel = 7
# 2.2.1.3.14 NETLOGON_CAPABILITIES
class NETLOGON_CAPABILITIES(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('ServerCapabilities', ULONG),
}
# 2.2.1.3.15 NL_OSVERSIONINFO_V1
class UCHAR_FIXED_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return 128
class NL_OSVERSIONINFO_V1(NDRSTRUCT):
structure = (
('dwOSVersionInfoSize', DWORD),
('dwMajorVersion', DWORD),
('dwMinorVersion', DWORD),
('dwBuildNumber', DWORD),
('dwPlatformId', DWORD),
('szCSDVersion', UCHAR_FIXED_ARRAY),
('wServicePackMajor', USHORT),
('wServicePackMinor', USHORT),
('wSuiteMask', USHORT),
('wProductType', UCHAR),
('wReserved', UCHAR),
)
class PNL_OSVERSIONINFO_V1(NDRPOINTER):
referent = (
('Data', NL_OSVERSIONINFO_V1),
)
# 2.2.1.3.16 NL_IN_CHAIN_SET_CLIENT_ATTRIBUTES_V1
class PLPWSTR(NDRPOINTER):
referent = (
('Data', LPWSTR),
)
class NL_IN_CHAIN_SET_CLIENT_ATTRIBUTES_V1(NDRSTRUCT):
structure = (
('ClientDnsHostName', PLPWSTR),
('OsVersionInfo', PNL_OSVERSIONINFO_V1),
('OsName', PLPWSTR),
)
# 2.2.1.3.17 NL_IN_CHAIN_SET_CLIENT_ATTRIBUTES
class NL_IN_CHAIN_SET_CLIENT_ATTRIBUTES(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', NL_IN_CHAIN_SET_CLIENT_ATTRIBUTES_V1),
}
# 2.2.1.3.18 NL_OUT_CHAIN_SET_CLIENT_ATTRIBUTES_V1
class NL_OUT_CHAIN_SET_CLIENT_ATTRIBUTES_V1(NDRSTRUCT):
structure = (
('HubName', PLPWSTR),
('OldDnsHostName', PLPWSTR),
('SupportedEncTypes', LPULONG),
)
# 2.2.1.3.19 NL_OUT_CHAIN_SET_CLIENT_ATTRIBUTES
class NL_OUT_CHAIN_SET_CLIENT_ATTRIBUTES(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', NL_OUT_CHAIN_SET_CLIENT_ATTRIBUTES_V1),
}
# 2.2.1.4.1 LM_CHALLENGE
class CHAR_FIXED_8_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return 8
class LM_CHALLENGE(NDRSTRUCT):
structure = (
('Data', CHAR_FIXED_8_ARRAY),
)
# 2.2.1.4.15 NETLOGON_LOGON_IDENTITY_INFO
class NETLOGON_LOGON_IDENTITY_INFO(NDRSTRUCT):
structure = (
('LogonDomainName', RPC_UNICODE_STRING),
('ParameterControl', ULONG),
('Reserved', OLD_LARGE_INTEGER),
('UserName', RPC_UNICODE_STRING),
('Workstation', RPC_UNICODE_STRING),
)
class PNETLOGON_LOGON_IDENTITY_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_LOGON_IDENTITY_INFO),
)
# 2.2.1.4.2 NETLOGON_GENERIC_INFO
class NETLOGON_GENERIC_INFO(NDRSTRUCT):
structure = (
('Identity', NETLOGON_LOGON_IDENTITY_INFO),
('PackageName', RPC_UNICODE_STRING),
('DataLength', ULONG),
('LogonData', PUCHAR_ARRAY),
)
class PNETLOGON_GENERIC_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_GENERIC_INFO),
)
# 2.2.1.4.3 NETLOGON_INTERACTIVE_INFO
class NETLOGON_INTERACTIVE_INFO(NDRSTRUCT):
structure = (
('Identity', NETLOGON_LOGON_IDENTITY_INFO),
('LmOwfPassword', LM_OWF_PASSWORD),
('NtOwfPassword', NT_OWF_PASSWORD),
)
class PNETLOGON_INTERACTIVE_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_INTERACTIVE_INFO),
)
# 2.2.1.4.4 NETLOGON_SERVICE_INFO
class NETLOGON_SERVICE_INFO(NDRSTRUCT):
structure = (
('Identity', NETLOGON_LOGON_IDENTITY_INFO),
('LmOwfPassword', LM_OWF_PASSWORD),
('NtOwfPassword', NT_OWF_PASSWORD),
)
class PNETLOGON_SERVICE_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_SERVICE_INFO),
)
# 2.2.1.4.5 NETLOGON_NETWORK_INFO
class NETLOGON_NETWORK_INFO(NDRSTRUCT):
structure = (
('Identity', NETLOGON_LOGON_IDENTITY_INFO),
('LmChallenge', LM_CHALLENGE),
('NtChallengeResponse', STRING),
('LmChallengeResponse', STRING),
)
class PNETLOGON_NETWORK_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_NETWORK_INFO),
)
# 2.2.1.4.16 NETLOGON_LOGON_INFO_CLASS
class NETLOGON_LOGON_INFO_CLASS(NDRENUM):
class enumItems(Enum):
NetlogonInteractiveInformation = 1
NetlogonNetworkInformation = 2
NetlogonServiceInformation = 3
NetlogonGenericInformation = 4
NetlogonInteractiveTransitiveInformation = 5
NetlogonNetworkTransitiveInformation = 6
NetlogonServiceTransitiveInformation = 7
# 2.2.1.4.6 NETLOGON_LEVEL
class NETLOGON_LEVEL(NDRUNION):
union = {
NETLOGON_LOGON_INFO_CLASS.NetlogonInteractiveInformation : ('LogonInteractive', PNETLOGON_INTERACTIVE_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonInteractiveTransitiveInformation : ('LogonInteractiveTransitive', PNETLOGON_INTERACTIVE_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonServiceInformation : ('LogonService', PNETLOGON_SERVICE_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonServiceTransitiveInformation : ('LogonServiceTransitive', PNETLOGON_SERVICE_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkInformation : ('LogonNetwork', PNETLOGON_NETWORK_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation : ('LogonNetworkTransitive', PNETLOGON_NETWORK_INFO),
NETLOGON_LOGON_INFO_CLASS.NetlogonGenericInformation : ('LogonGeneric', PNETLOGON_GENERIC_INFO),
}
# 2.2.1.4.7 NETLOGON_SID_AND_ATTRIBUTES
class NETLOGON_SID_AND_ATTRIBUTES(NDRSTRUCT):
structure = (
('Sid', PRPC_SID),
('Attributes', ULONG),
)
# 2.2.1.4.8 NETLOGON_VALIDATION_GENERIC_INFO2
class NETLOGON_VALIDATION_GENERIC_INFO2(NDRSTRUCT):
structure = (
('DataLength', ULONG),
('ValidationData', PUCHAR_ARRAY),
)
class PNETLOGON_VALIDATION_GENERIC_INFO2(NDRPOINTER):
referent = (
('Data', NETLOGON_VALIDATION_GENERIC_INFO2),
)
# 2.2.1.4.9 USER_SESSION_KEY
USER_SESSION_KEY = LM_OWF_PASSWORD
# 2.2.1.4.10 GROUP_MEMBERSHIP
class GROUP_MEMBERSHIP(NDRSTRUCT):
structure = (
('RelativeId', ULONG),
('Attributes', ULONG),
)
class GROUP_MEMBERSHIP_ARRAY(NDRUniConformantArray):
item = GROUP_MEMBERSHIP
class PGROUP_MEMBERSHIP_ARRAY(NDRPOINTER):
referent = (
('Data', GROUP_MEMBERSHIP_ARRAY),
)
# 2.2.1.4.11 NETLOGON_VALIDATION_SAM_INFO
class LONG_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return 4*10
class NETLOGON_VALIDATION_SAM_INFO(NDRSTRUCT):
structure = (
('LogonTime', OLD_LARGE_INTEGER),
('LogoffTime', OLD_LARGE_INTEGER),
('KickOffTime', OLD_LARGE_INTEGER),
('PasswordLastSet', OLD_LARGE_INTEGER),
('PasswordCanChange', OLD_LARGE_INTEGER),
('PasswordMustChange', OLD_LARGE_INTEGER),
('EffectiveName', RPC_UNICODE_STRING),
('FullName', RPC_UNICODE_STRING),
('LogonScript', RPC_UNICODE_STRING),
('ProfilePath', RPC_UNICODE_STRING),
('HomeDirectory', RPC_UNICODE_STRING),
('HomeDirectoryDrive', RPC_UNICODE_STRING),
('LogonCount', USHORT),
('BadPasswordCount', USHORT),
('UserId', ULONG),
('PrimaryGroupId', ULONG),
('GroupCount', ULONG),
('GroupIds', PGROUP_MEMBERSHIP_ARRAY),
('UserFlags', ULONG),
('UserSessionKey', USER_SESSION_KEY),
('LogonServer', RPC_UNICODE_STRING),
('LogonDomainName', RPC_UNICODE_STRING),
('LogonDomainId', PRPC_SID),
('ExpansionRoom', LONG_ARRAY),
)
class PNETLOGON_VALIDATION_SAM_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_VALIDATION_SAM_INFO),
)
# 2.2.1.4.12 NETLOGON_VALIDATION_SAM_INFO2
class NETLOGON_SID_AND_ATTRIBUTES_ARRAY(NDRUniConformantArray):
item = NETLOGON_SID_AND_ATTRIBUTES
class PNETLOGON_SID_AND_ATTRIBUTES_ARRAY(NDRPOINTER):
referent = (
('Data', NETLOGON_SID_AND_ATTRIBUTES_ARRAY),
)
class NETLOGON_VALIDATION_SAM_INFO2(NDRSTRUCT):
structure = (
('LogonTime', OLD_LARGE_INTEGER),
('LogoffTime', OLD_LARGE_INTEGER),
('KickOffTime', OLD_LARGE_INTEGER),
('PasswordLastSet', OLD_LARGE_INTEGER),
('PasswordCanChange', OLD_LARGE_INTEGER),
('PasswordMustChange', OLD_LARGE_INTEGER),
('EffectiveName', RPC_UNICODE_STRING),
('FullName', RPC_UNICODE_STRING),
('LogonScript', RPC_UNICODE_STRING),
('ProfilePath', RPC_UNICODE_STRING),
('HomeDirectory', RPC_UNICODE_STRING),
('HomeDirectoryDrive', RPC_UNICODE_STRING),
('LogonCount', USHORT),
('BadPasswordCount', USHORT),
('UserId', ULONG),
('PrimaryGroupId', ULONG),
('GroupCount', ULONG),
('GroupIds', PGROUP_MEMBERSHIP_ARRAY),
('UserFlags', ULONG),
('UserSessionKey', USER_SESSION_KEY),
('LogonServer', RPC_UNICODE_STRING),
('LogonDomainName', RPC_UNICODE_STRING),
('LogonDomainId', PRPC_SID),
('ExpansionRoom', LONG_ARRAY),
('SidCount', ULONG),
('ExtraSids', PNETLOGON_SID_AND_ATTRIBUTES_ARRAY),
)
class PNETLOGON_VALIDATION_SAM_INFO2(NDRPOINTER):
referent = (
('Data', NETLOGON_VALIDATION_SAM_INFO2),
)
# 2.2.1.4.13 NETLOGON_VALIDATION_SAM_INFO4
class NETLOGON_VALIDATION_SAM_INFO4(NDRSTRUCT):
structure = (
('LogonTime', OLD_LARGE_INTEGER),
('LogoffTime', OLD_LARGE_INTEGER),
('KickOffTime', OLD_LARGE_INTEGER),
('PasswordLastSet', OLD_LARGE_INTEGER),
('PasswordCanChange', OLD_LARGE_INTEGER),
('PasswordMustChange', OLD_LARGE_INTEGER),
('EffectiveName', RPC_UNICODE_STRING),
('FullName', RPC_UNICODE_STRING),
('LogonScript', RPC_UNICODE_STRING),
('ProfilePath', RPC_UNICODE_STRING),
('HomeDirectory', RPC_UNICODE_STRING),
('HomeDirectoryDrive', RPC_UNICODE_STRING),
('LogonCount', USHORT),
('BadPasswordCount', USHORT),
('UserId', ULONG),
('PrimaryGroupId', ULONG),
('GroupCount', ULONG),
('GroupIds', PGROUP_MEMBERSHIP_ARRAY),
('UserFlags', ULONG),
('UserSessionKey', USER_SESSION_KEY),
('LogonServer', RPC_UNICODE_STRING),
('LogonDomainName', RPC_UNICODE_STRING),
('LogonDomainId', PRPC_SID),
('LMKey', CHAR_FIXED_8_ARRAY),
('UserAccountControl', ULONG),
('SubAuthStatus', ULONG),
('LastSuccessfulILogon', OLD_LARGE_INTEGER),
('LastFailedILogon', OLD_LARGE_INTEGER),
('FailedILogonCount', ULONG),
('Reserved4', ULONG),
('SidCount', ULONG),
('ExtraSids', PNETLOGON_SID_AND_ATTRIBUTES_ARRAY),
('DnsLogonDomainName', RPC_UNICODE_STRING),
('Upn', RPC_UNICODE_STRING),
('ExpansionString1', RPC_UNICODE_STRING),
('ExpansionString2', RPC_UNICODE_STRING),
('ExpansionString3', RPC_UNICODE_STRING),
('ExpansionString4', RPC_UNICODE_STRING),
('ExpansionString5', RPC_UNICODE_STRING),
('ExpansionString6', RPC_UNICODE_STRING),
('ExpansionString7', RPC_UNICODE_STRING),
('ExpansionString8', RPC_UNICODE_STRING),
('ExpansionString9', RPC_UNICODE_STRING),
('ExpansionString10', RPC_UNICODE_STRING),
)
class PNETLOGON_VALIDATION_SAM_INFO4(NDRPOINTER):
referent = (
('Data', NETLOGON_VALIDATION_SAM_INFO4),
)
# 2.2.1.4.17 NETLOGON_VALIDATION_INFO_CLASS
class NETLOGON_VALIDATION_INFO_CLASS(NDRENUM):
class enumItems(Enum):
NetlogonValidationUasInfo = 1
NetlogonValidationSamInfo = 2
NetlogonValidationSamInfo2 = 3
NetlogonValidationGenericInfo = 4
NetlogonValidationGenericInfo2 = 5
NetlogonValidationSamInfo4 = 6
# 2.2.1.4.14 NETLOGON_VALIDATION
class NETLOGON_VALIDATION(NDRUNION):
union = {
NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo : ('ValidationSam', PNETLOGON_VALIDATION_SAM_INFO),
NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo2 : ('ValidationSam2', PNETLOGON_VALIDATION_SAM_INFO2),
NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationGenericInfo2: ('ValidationGeneric2', PNETLOGON_VALIDATION_GENERIC_INFO2),
NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4 : ('ValidationSam4', PNETLOGON_VALIDATION_SAM_INFO4),
}
# 2.2.1.5.2 NLPR_QUOTA_LIMITS
class NLPR_QUOTA_LIMITS(NDRSTRUCT):
structure = (
('PagedPoolLimit', ULONG),
('NonPagedPoolLimit', ULONG),
('MinimumWorkingSetSize', ULONG),
('MaximumWorkingSetSize', ULONG),
('PagefileLimit', ULONG),
('Reserved', OLD_LARGE_INTEGER),
)
# 2.2.1.5.3 NETLOGON_DELTA_ACCOUNTS
class ULONG_ARRAY(NDRUniConformantArray):
item = ULONG
class PULONG_ARRAY(NDRPOINTER):
referent = (
('Data', ULONG_ARRAY),
)
class NETLOGON_DELTA_ACCOUNTS(NDRSTRUCT):
structure = (
('PrivilegeEntries', ULONG),
('PrivilegeControl', ULONG),
('PrivilegeAttributes', PULONG_ARRAY),
('PrivilegeNames', PRPC_UNICODE_STRING_ARRAY),
('QuotaLimits', NLPR_QUOTA_LIMITS),
('SystemAccessFlags', ULONG),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_ACCOUNTS(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_ACCOUNTS),
)
# 2.2.1.5.5 NLPR_SID_INFORMATION
class NLPR_SID_INFORMATION(NDRSTRUCT):
structure = (
('SidPointer', PRPC_SID),
)
# 2.2.1.5.6 NLPR_SID_ARRAY
class NLPR_SID_INFORMATION_ARRAY(NDRUniConformantArray):
item = NLPR_SID_INFORMATION
class PNLPR_SID_INFORMATION_ARRAY(NDRPOINTER):
referent = (
('Data', NLPR_SID_INFORMATION_ARRAY),
)
class NLPR_SID_ARRAY(NDRSTRUCT):
referent = (
('Count', ULONG),
('Sids', PNLPR_SID_INFORMATION_ARRAY),
)
# 2.2.1.5.7 NETLOGON_DELTA_ALIAS_MEMBER
class NETLOGON_DELTA_ALIAS_MEMBER(NDRSTRUCT):
structure = (
('Members', NLPR_SID_ARRAY),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_ALIAS_MEMBER(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_ALIAS_MEMBER),
)
# 2.2.1.5.8 NETLOGON_DELTA_DELETE_GROUP
class NETLOGON_DELTA_DELETE_GROUP(NDRSTRUCT):
structure = (
('AccountName', LPWSTR),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_DELETE_GROUP(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_DELETE_GROUP),
)
# 2.2.1.5.9 NETLOGON_DELTA_DELETE_USER
class NETLOGON_DELTA_DELETE_USER(NDRSTRUCT):
structure = (
('AccountName', LPWSTR),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_DELETE_USER(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_DELETE_USER),
)
# 2.2.1.5.10 NETLOGON_DELTA_DOMAIN
class NETLOGON_DELTA_DOMAIN(NDRSTRUCT):
structure = (
('DomainName', RPC_UNICODE_STRING),
('OemInformation', RPC_UNICODE_STRING),
('ForceLogoff', OLD_LARGE_INTEGER),
('MinPasswordLength', USHORT),
('PasswordHistoryLength', USHORT),
('MaxPasswordAge', OLD_LARGE_INTEGER),
('MinPasswordAge', OLD_LARGE_INTEGER),
('DomainModifiedCount', OLD_LARGE_INTEGER),
('DomainCreationTime', OLD_LARGE_INTEGER),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('DomainLockoutInformation', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('PasswordProperties', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_DOMAIN(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_DOMAIN),
)
# 2.2.1.5.13 NETLOGON_DELTA_GROUP
class NETLOGON_DELTA_GROUP(NDRSTRUCT):
structure = (
('Name', RPC_UNICODE_STRING),
('RelativeId', ULONG),
('Attributes', ULONG),
('AdminComment', RPC_UNICODE_STRING),
('SecurityInformation', USHORT),
('SecuritySize', ULONG),
('SecurityDescriptor', SECURITY_INFORMATION),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_GROUP(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_GROUP),
)
# 2.2.1.5.24 NETLOGON_RENAME_GROUP
class NETLOGON_RENAME_GROUP(NDRSTRUCT):
structure = (
('OldName', RPC_UNICODE_STRING),
('NewName', RPC_UNICODE_STRING),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_RENAME_GROUP(NDRPOINTER):
referent = (
('Data', NETLOGON_RENAME_GROUP),
)
# 2.2.1.5.14 NLPR_LOGON_HOURS
from impacket.dcerpc.v5.samr import SAMPR_LOGON_HOURS
NLPR_LOGON_HOURS = SAMPR_LOGON_HOURS
# 2.2.1.5.15 NLPR_USER_PRIVATE_INFO
class NLPR_USER_PRIVATE_INFO(NDRSTRUCT):
structure = (
('SensitiveData', UCHAR),
('DataLength', ULONG),
('Data', PUCHAR_ARRAY),
)
# 2.2.1.5.16 NETLOGON_DELTA_USER
class NETLOGON_DELTA_USER(NDRSTRUCT):
structure = (
('UserName', RPC_UNICODE_STRING),
('FullName', RPC_UNICODE_STRING),
('UserId', ULONG),
('PrimaryGroupId', ULONG),
('HomeDirectory', RPC_UNICODE_STRING),
('HomeDirectoryDrive', RPC_UNICODE_STRING),
('ScriptPath', RPC_UNICODE_STRING),
('AdminComment', RPC_UNICODE_STRING),
('WorkStations', RPC_UNICODE_STRING),
('LastLogon', OLD_LARGE_INTEGER),
('LastLogoff', OLD_LARGE_INTEGER),
('LogonHours', NLPR_LOGON_HOURS),
('BadPasswordCount', USHORT),
('LogonCount', USHORT),
('PasswordLastSet', OLD_LARGE_INTEGER),
('AccountExpires', OLD_LARGE_INTEGER),
('UserAccountControl', ULONG),
('EncryptedNtOwfPassword', PUCHAR_ARRAY),
('EncryptedLmOwfPassword', PUCHAR_ARRAY),
('NtPasswordPresent', UCHAR),
('LmPasswordPresent', UCHAR),
('PasswordExpired', UCHAR),
('UserComment', RPC_UNICODE_STRING),
('Parameters', RPC_UNICODE_STRING),
('CountryCode', USHORT),
('CodePage', USHORT),
('PrivateData', NLPR_USER_PRIVATE_INFO),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('ProfilePath', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_USER(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_USER),
)
# 2.2.1.5.25 NETLOGON_RENAME_USER
class NETLOGON_RENAME_USER(NDRSTRUCT):
structure = (
('OldName', RPC_UNICODE_STRING),
('NewName', RPC_UNICODE_STRING),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_RENAME_USER(NDRPOINTER):
referent = (
('Data', NETLOGON_RENAME_USER),
)
# 2.2.1.5.17 NETLOGON_DELTA_GROUP_MEMBER
class NETLOGON_DELTA_GROUP_MEMBER(NDRSTRUCT):
structure = (
('Members', PULONG_ARRAY),
('Attributes', PULONG_ARRAY),
('MemberCount', ULONG),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_GROUP_MEMBER(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_GROUP_MEMBER),
)
# 2.2.1.5.4 NETLOGON_DELTA_ALIAS
class NETLOGON_DELTA_ALIAS(NDRSTRUCT):
structure = (
('Name', RPC_UNICODE_STRING),
('RelativeId', ULONG),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('Comment', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_ALIAS(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_ALIAS),
)
# 2.2.1.5.23 NETLOGON_RENAME_ALIAS
class NETLOGON_RENAME_ALIAS(NDRSTRUCT):
structure = (
('OldName', RPC_UNICODE_STRING),
('NewName', RPC_UNICODE_STRING),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_RENAME_ALIAS(NDRPOINTER):
referent = (
('Data', NETLOGON_RENAME_ALIAS),
)
# 2.2.1.5.19 NETLOGON_DELTA_POLICY
class NETLOGON_DELTA_POLICY(NDRSTRUCT):
structure = (
('MaximumLogSize', ULONG),
('AuditRetentionPeriod', OLD_LARGE_INTEGER),
('AuditingMode', UCHAR),
('MaximumAuditEventCount', ULONG),
('EventAuditingOptions', PULONG_ARRAY),
('PrimaryDomainName', RPC_UNICODE_STRING),
('PrimaryDomainSid', PRPC_SID),
('QuotaLimits', NLPR_QUOTA_LIMITS),
('ModifiedId', OLD_LARGE_INTEGER),
('DatabaseCreationTime', OLD_LARGE_INTEGER),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_POLICY(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_POLICY),
)
# 2.2.1.5.22 NETLOGON_DELTA_TRUSTED_DOMAINS
class NETLOGON_DELTA_TRUSTED_DOMAINS(NDRSTRUCT):
structure = (
('DomainName', RPC_UNICODE_STRING),
('NumControllerEntries', ULONG),
('ControllerNames', PRPC_UNICODE_STRING_ARRAY),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_TRUSTED_DOMAINS(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_TRUSTED_DOMAINS),
)
# 2.2.1.5.20 NLPR_CR_CIPHER_VALUE
class UCHAR_ARRAY2(NDRUniConformantVaryingArray):
item = UCHAR
class PUCHAR_ARRAY2(NDRPOINTER):
referent = (
('Data', UCHAR_ARRAY2),
)
class NLPR_CR_CIPHER_VALUE(NDRSTRUCT):
structure = (
('Length', ULONG),
('MaximumLength', ULONG),
('Buffer', PUCHAR_ARRAY2),
)
# 2.2.1.5.21 NETLOGON_DELTA_SECRET
class NETLOGON_DELTA_SECRET(NDRSTRUCT):
structure = (
('CurrentValue', NLPR_CR_CIPHER_VALUE),
('CurrentValueSetTime', OLD_LARGE_INTEGER),
('OldValue', NLPR_CR_CIPHER_VALUE),
('OldValueSetTime', OLD_LARGE_INTEGER),
('SecurityInformation', SECURITY_INFORMATION),
('SecuritySize', ULONG),
('SecurityDescriptor', PUCHAR_ARRAY),
('DummyString1', RPC_UNICODE_STRING),
('DummyString2', RPC_UNICODE_STRING),
('DummyString3', RPC_UNICODE_STRING),
('DummyString4', RPC_UNICODE_STRING),
('DummyLong1', ULONG),
('DummyLong2', ULONG),
('DummyLong3', ULONG),
('DummyLong4', ULONG),
)
class PNETLOGON_DELTA_SECRET(NDRPOINTER):
referent = (
('Data', NETLOGON_DELTA_SECRET),
)
# 2.2.1.5.26 NLPR_MODIFIED_COUNT
class NLPR_MODIFIED_COUNT(NDRSTRUCT):
structure = (
('ModifiedCount', OLD_LARGE_INTEGER),
)
class PNLPR_MODIFIED_COUNT(NDRPOINTER):
referent = (
('Data', NLPR_MODIFIED_COUNT),
)
# 2.2.1.5.28 NETLOGON_DELTA_TYPE
class NETLOGON_DELTA_TYPE(NDRENUM):
class enumItems(Enum):
AddOrChangeDomain = 1
AddOrChangeGroup = 2
DeleteGroup = 3
RenameGroup = 4
AddOrChangeUser = 5
DeleteUser = 6
RenameUser = 7
ChangeGroupMembership = 8
AddOrChangeAlias = 9
DeleteAlias = 10
RenameAlias = 11
ChangeAliasMembership = 12
AddOrChangeLsaPolicy = 13
AddOrChangeLsaTDomain = 14
DeleteLsaTDomain = 15
AddOrChangeLsaAccount = 16
DeleteLsaAccount = 17
AddOrChangeLsaSecret = 18
DeleteLsaSecret = 19
DeleteGroupByName = 20
DeleteUserByName = 21
SerialNumberSkip = 22
# 2.2.1.5.27 NETLOGON_DELTA_UNION
class NETLOGON_DELTA_UNION(NDRUNION):
union = {
NETLOGON_DELTA_TYPE.AddOrChangeDomain : ('DeltaDomain', PNETLOGON_DELTA_DOMAIN),
NETLOGON_DELTA_TYPE.AddOrChangeGroup : ('DeltaGroup', PNETLOGON_DELTA_GROUP),
NETLOGON_DELTA_TYPE.RenameGroup : ('DeltaRenameGroup', PNETLOGON_DELTA_RENAME_GROUP),
NETLOGON_DELTA_TYPE.AddOrChangeUser : ('DeltaUser', PNETLOGON_DELTA_USER),
NETLOGON_DELTA_TYPE.RenameUser : ('DeltaRenameUser', PNETLOGON_DELTA_RENAME_USER),
NETLOGON_DELTA_TYPE.ChangeGroupMembership : ('DeltaGroupMember', PNETLOGON_DELTA_GROUP_MEMBER),
NETLOGON_DELTA_TYPE.AddOrChangeAlias : ('DeltaAlias', PNETLOGON_DELTA_ALIAS),
NETLOGON_DELTA_TYPE.RenameAlias : ('DeltaRenameAlias', PNETLOGON_DELTA_RENAME_ALIAS),
NETLOGON_DELTA_TYPE.ChangeAliasMembership : ('DeltaAliasMember', PNETLOGON_DELTA_ALIAS_MEMBER),
NETLOGON_DELTA_TYPE.AddOrChangeLsaPolicy : ('DeltaPolicy', PNETLOGON_DELTA_POLICY),
NETLOGON_DELTA_TYPE.AddOrChangeLsaTDomain : ('DeltaTDomains', PNETLOGON_DELTA_TRUSTED_DOMAINS),
NETLOGON_DELTA_TYPE.AddOrChangeLsaAccount : ('DeltaAccounts', PNETLOGON_DELTA_ACCOUNTS),
NETLOGON_DELTA_TYPE.AddOrChangeLsaSecret : ('DeltaSecret', PNETLOGON_DELTA_SECRET),
NETLOGON_DELTA_TYPE.DeleteGroupByName : ('DeltaDeleteGroup', PNETLOGON_DELTA_DELETE_GROUP),
NETLOGON_DELTA_TYPE.DeleteUserByName : ('DeltaDeleteUser', PNETLOGON_DELTA_DELETE_USER),
NETLOGON_DELTA_TYPE.SerialNumberSkip : ('DeltaSerialNumberSkip', PNLPR_MODIFIED_COUNT),
}
# 2.2.1.5.18 NETLOGON_DELTA_ID_UNION
class NETLOGON_DELTA_ID_UNION(NDRUNION):
union = {
NETLOGON_DELTA_TYPE.AddOrChangeDomain : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.AddOrChangeGroup : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.DeleteGroup : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.RenameGroup : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.AddOrChangeUser : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.DeleteUser : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.RenameUser : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.ChangeGroupMembership : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.AddOrChangeAlias : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.DeleteAlias : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.RenameAlias : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.ChangeAliasMembership : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.DeleteGroupByName : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.DeleteUserByName : ('Rid', ULONG),
NETLOGON_DELTA_TYPE.AddOrChangeLsaPolicy : ('Sid', PRPC_SID),
NETLOGON_DELTA_TYPE.AddOrChangeLsaTDomain : ('Sid', PRPC_SID),
NETLOGON_DELTA_TYPE.DeleteLsaTDomain : ('Sid', PRPC_SID),
NETLOGON_DELTA_TYPE.AddOrChangeLsaAccount : ('Sid', PRPC_SID),
NETLOGON_DELTA_TYPE.DeleteLsaAccount : ('Sid', PRPC_SID),
NETLOGON_DELTA_TYPE.AddOrChangeLsaSecret : ('Name', LPWSTR),
NETLOGON_DELTA_TYPE.DeleteLsaSecret : ('Name', LPWSTR),
}
# 2.2.1.5.11 NETLOGON_DELTA_ENUM
class NETLOGON_DELTA_ENUM(NDRSTRUCT):
structure = (
('DeltaType', NETLOGON_DELTA_TYPE),
('DeltaID', NETLOGON_DELTA_ID_UNION),
('DeltaUnion', NETLOGON_DELTA_UNION),
)
# 2.2.1.5.12 NETLOGON_DELTA_ENUM_ARRAY
class NETLOGON_DELTA_ENUM_ARRAY_ARRAY(NDRUniConformantArray):
item = NETLOGON_DELTA_ENUM
class PNETLOGON_DELTA_ENUM_ARRAY_ARRAY(NDRSTRUCT):
referent = (
('Data', NETLOGON_DELTA_ENUM_ARRAY_ARRAY),
)
class PNETLOGON_DELTA_ENUM_ARRAY(NDRPOINTER):
structure = (
('CountReturned', DWORD),
('Deltas', PNETLOGON_DELTA_ENUM_ARRAY_ARRAY),
)
# 2.2.1.5.29 SYNC_STATE
class SYNC_STATE(NDRENUM):
class enumItems(Enum):
NormalState = 0
DomainState = 1
GroupState = 2
UasBuiltInGroupState = 3
UserState = 4
GroupMemberState = 5
AliasState = 6
AliasMemberState = 7
SamDoneState = 8
# 2.2.1.6.1 DOMAIN_NAME_BUFFER
class DOMAIN_NAME_BUFFER(NDRSTRUCT):
structure = (
('DomainNameByteCount', ULONG),
('DomainNames', PUCHAR_ARRAY),
)
# 2.2.1.6.2 DS_DOMAIN_TRUSTSW
class DS_DOMAIN_TRUSTSW(NDRSTRUCT):
structure = (
('NetbiosDomainName', LPWSTR),
('DnsDomainName', LPWSTR),
('Flags', ULONG),
('ParentIndex', ULONG),
('TrustType', ULONG),
('TrustAttributes', ULONG),
('DomainSid', PRPC_SID),
('DomainGuid', GUID),
)
# 2.2.1.6.3 NETLOGON_TRUSTED_DOMAIN_ARRAY
class DS_DOMAIN_TRUSTSW_ARRAY(NDRUniConformantArray):
item = DS_DOMAIN_TRUSTSW
class PDS_DOMAIN_TRUSTSW_ARRAY(NDRPOINTER):
referent = (
('Data', DS_DOMAIN_TRUSTSW_ARRAY),
)
class NETLOGON_TRUSTED_DOMAIN_ARRAY(NDRSTRUCT):
structure = (
('DomainCount', DWORD),
('Domains', PDS_DOMAIN_TRUSTSW_ARRAY),
)
# 2.2.1.6.4 NL_GENERIC_RPC_DATA
class NL_GENERIC_RPC_DATA(NDRSTRUCT):
structure = (
('UlongEntryCount', ULONG),
('UlongData', PULONG_ARRAY),
('UnicodeStringEntryCount', ULONG),
('UnicodeStringData', PRPC_UNICODE_STRING_ARRAY),
)
class PNL_GENERIC_RPC_DATA(NDRPOINTER):
referent = (
('Data', NL_GENERIC_RPC_DATA),
)
# 2.2.1.7.1 NETLOGON_CONTROL_DATA_INFORMATION
class NETLOGON_CONTROL_DATA_INFORMATION(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
5 : ('TrustedDomainName', LPWSTR),
6 : ('TrustedDomainName', LPWSTR),
9 : ('TrustedDomainName', LPWSTR),
10 : ('TrustedDomainName', LPWSTR),
65534 : ('DebugFlag', DWORD),
8: ('UserName', LPWSTR),
}
# 2.2.1.7.2 NETLOGON_INFO_1
class NETLOGON_INFO_1(NDRSTRUCT):
structure = (
('netlog1_flags', DWORD),
('netlog1_pdc_connection_status', NET_API_STATUS),
)
class PNETLOGON_INFO_1(NDRPOINTER):
referent = (
('Data', NETLOGON_INFO_1),
)
# 2.2.1.7.3 NETLOGON_INFO_2
class NETLOGON_INFO_2(NDRSTRUCT):
structure = (
('netlog2_flags', DWORD),
('netlog2_pdc_connection_status', NET_API_STATUS),
('netlog2_trusted_dc_name', LPWSTR),
('netlog2_tc_connection_status', NET_API_STATUS),
)
class PNETLOGON_INFO_2(NDRPOINTER):
referent = (
('Data', NETLOGON_INFO_2),
)
# 2.2.1.7.4 NETLOGON_INFO_3
class NETLOGON_INFO_3(NDRSTRUCT):
structure = (
('netlog3_flags', DWORD),
('netlog3_logon_attempts', DWORD),
('netlog3_reserved1', DWORD),
('netlog3_reserved2', DWORD),
('netlog3_reserved3', DWORD),
('netlog3_reserved4', DWORD),
('netlog3_reserved5', DWORD),
)
class PNETLOGON_INFO_3(NDRPOINTER):
referent = (
('Data', NETLOGON_INFO_3),
)
# 2.2.1.7.5 NETLOGON_INFO_4
class NETLOGON_INFO_4(NDRSTRUCT):
structure = (
('netlog4_trusted_dc_name', LPWSTR),
('netlog4_trusted_domain_name', LPWSTR),
)
class PNETLOGON_INFO_4(NDRPOINTER):
referent = (
('Data', NETLOGON_INFO_4),
)
# 2.2.1.7.6 NETLOGON_CONTROL_QUERY_INFORMATION
class NETLOGON_CONTROL_QUERY_INFORMATION(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('NetlogonInfo1', PNETLOGON_INFO_1),
2 : ('NetlogonInfo2', PNETLOGON_INFO_2),
3 : ('NetlogonInfo3', PNETLOGON_INFO_3),
4 : ('NetlogonInfo4', PNETLOGON_INFO_4),
}
# 2.2.1.8.1 NETLOGON_VALIDATION_UAS_INFO
class NETLOGON_VALIDATION_UAS_INFO(NDRSTRUCT):
structure = (
('usrlog1_eff_name', DWORD),
('usrlog1_priv', DWORD),
('usrlog1_auth_flags', DWORD),
('usrlog1_num_logons', DWORD),
('usrlog1_bad_pw_count', DWORD),
('usrlog1_last_logon', DWORD),
('usrlog1_last_logoff', DWORD),
('usrlog1_logoff_time', DWORD),
('usrlog1_kickoff_time', DWORD),
('usrlog1_password_age', DWORD),
('usrlog1_pw_can_change', DWORD),
('usrlog1_pw_must_change', DWORD),
('usrlog1_computer', LPWSTR),
('usrlog1_domain', LPWSTR),
('usrlog1_script_path', LPWSTR),
('usrlog1_reserved1', DWORD),
)
class PNETLOGON_VALIDATION_UAS_INFO(NDRPOINTER):
referent = (
('Data', NETLOGON_VALIDATION_UAS_INFO),
)
# 2.2.1.8.2 NETLOGON_LOGOFF_UAS_INFO
class NETLOGON_LOGOFF_UAS_INFO(NDRSTRUCT):
structure = (
('Duration', DWORD),
('LogonCount', USHORT),
)
# 2.2.1.8.3 UAS_INFO_0
class UAS_INFO_0(NDRSTRUCT):
structure = (
('ComputerName', '16s=""'),
('TimeCreated', ULONG),
('SerialNumber', ULONG),
)
def getAlignment(self):
return 4
# 2.2.1.8.4 NETLOGON_DUMMY1
class NETLOGON_DUMMY1(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('Dummy', ULONG),
}
# 3.5.4.8.2 NetrLogonComputeServerDigest (Opnum 24)
class CHAR_FIXED_16_ARRAY(NDRUniFixedArray):
def getDataLen(self, data):
return 16
################################################################################
# SSPI
################################################################################
# Constants
NL_AUTH_MESSAGE_NETBIOS_DOMAIN = 0x1
NL_AUTH_MESSAGE_NETBIOS_HOST = 0x2
NL_AUTH_MESSAGE_DNS_DOMAIN = 0x4
NL_AUTH_MESSAGE_DNS_HOST = 0x8
NL_AUTH_MESSAGE_NETBIOS_HOST_UTF8 = 0x10
NL_AUTH_MESSAGE_REQUEST = 0x0
NL_AUTH_MESSAGE_RESPONSE = 0x1
NL_SIGNATURE_HMAC_MD5 = 0x77
NL_SIGNATURE_HMAC_SHA256 = 0x13
NL_SEAL_NOT_ENCRYPTED = 0xffff
NL_SEAL_RC4 = 0x7A
NL_SEAL_AES128 = 0x1A
# Structures
class NL_AUTH_MESSAGE(Structure):
structure = (
('MessageType','<L=0'),
('Flags','<L=0'),
('Buffer',':'),
)
def __init__(self, data = None, alignment = 0):
Structure.__init__(self, data, alignment)
if data is None:
self['Buffer'] = '\x00'*4
class NL_AUTH_SIGNATURE(Structure):
structure = (
('SignatureAlgorithm','<H=0'),
('SealAlgorithm','<H=0'),
('Pad','<H=0xffff'),
('Flags','<H=0'),
('SequenceNumber','8s=""'),
('Checksum','8s=""'),
('_Confounder','_-Confounder','8'),
('Confounder',':'),
)
def __init__(self, data = None, alignment = 0):
Structure.__init__(self, data, alignment)
if data is None:
self['Confounder'] = ''
class NL_AUTH_SHA2_SIGNATURE(Structure):
structure = (
('SignatureAlgorithm','<H=0'),
('SealAlgorithm','<H=0'),
('Pad','<H=0xffff'),
('Flags','<H=0'),
('SequenceNumber','8s=""'),
('Checksum','32s=""'),
('_Confounder','_-Confounder','8'),
('Confounder',':'),
)
def __init__(self, data = None, alignment = 0):
Structure.__init__(self, data, alignment)
if data is None:
self['Confounder'] = ''
# Section 3.1.4.4.2
def ComputeNetlogonCredential(inputData, Sk):
k1 = Sk[:7]
k3 = crypto.transformKey(k1)
k2 = Sk[7:14]
k4 = crypto.transformKey(k2)
Crypt1 = DES.new(k3, DES.MODE_ECB)
Crypt2 = DES.new(k4, DES.MODE_ECB)
cipherText = Crypt1.encrypt(inputData)
return Crypt2.encrypt(cipherText)
# Section 3.1.4.4.1
def ComputeNetlogonCredentialAES(inputData, Sk):
IV='\x00'*16
Crypt1 = AES.new(Sk, AES.MODE_CFB, IV)
return Crypt1.encrypt(inputData)
# Section 3.1.4.3.1
def ComputeSessionKeyAES(sharedSecret, clientChallenge, serverChallenge, sharedSecretHash = None):
# added the ability to receive hashes already
if sharedSecretHash is None:
M4SS = ntlm.NTOWFv1(sharedSecret)
else:
M4SS = sharedSecretHash
hm = hmac.new(key=M4SS, digestmod=hashlib.sha256)
hm.update(clientChallenge)
hm.update(serverChallenge)
sessionKey = hm.digest()
return sessionKey[:16]
# 3.1.4.3.2 Strong-key Session-Key
def ComputeSessionKeyStrongKey(sharedSecret, clientChallenge, serverChallenge, sharedSecretHash = None):
# added the ability to receive hashes already
if sharedSecretHash is None:
M4SS = ntlm.NTOWFv1(sharedSecret)
else:
M4SS = sharedSecretHash
md5 = hashlib.new('md5')
md5.update('\x00'*4)
md5.update(clientChallenge)
md5.update(serverChallenge)
finalMD5 = md5.digest()
hm = hmac.new(M4SS)
hm.update(finalMD5)
return hm.digest()
def deriveSequenceNumber(sequenceNum):
res = ''
sequenceLow = sequenceNum & 0xffffffff
sequenceHigh = (sequenceNum >> 32) & 0xffffffff
sequenceHigh |= 0x80000000
res = pack('>L', sequenceLow)
res += pack('>L', sequenceHigh)
return res
def ComputeNetlogonSignatureAES(authSignature, message, confounder, sessionKey):
# [MS-NRPC] Section 3.3.4.2.1, point 7
hm = hmac.new(key=sessionKey, digestmod=hashlib.sha256)
hm.update(str(authSignature)[:8])
# If no confidentiality requested, it should be ''
hm.update(confounder)
hm.update(str(message))
return hm.digest()[:8]+'\x00'*24
def ComputeNetlogonSignatureMD5(authSignature, message, confounder, sessionKey):
# [MS-NRPC] Section 3.3.4.2.1, point 7
md5 = hashlib.new('md5')
md5.update('\x00'*4)
md5.update(str(authSignature)[:8])
# If no confidentiality requested, it should be ''
md5.update(confounder)
md5.update(str(message))
finalMD5 = md5.digest()
hm = hmac.new(sessionKey)
hm.update(finalMD5)
return hm.digest()[:8]
def encryptSequenceNumberRC4(sequenceNum, checkSum, sessionKey):
# [MS-NRPC] Section 3.3.4.2.1, point 9
hm = hmac.new(sessionKey)
hm.update('\x00'*4)
hm2 = hmac.new(hm.digest())
hm2.update(checkSum)
encryptionKey = hm2.digest()
cipher = ARC4.new(encryptionKey)
return cipher.encrypt(sequenceNum)
def decryptSequenceNumberRC4(sequenceNum, checkSum, sessionKey):
# [MS-NRPC] Section 3.3.4.2.2, point 5
return encryptSequenceNumberRC4(sequenceNum, checkSum, sessionKey)
def encryptSequenceNumberAES(sequenceNum, checkSum, sessionKey):
# [MS-NRPC] Section 3.3.4.2.1, point 9
IV = checkSum[:8] + checkSum[:8]
Cipher = AES.new(sessionKey, AES.MODE_CFB, IV)
return Cipher.encrypt(sequenceNum)
def decryptSequenceNumberAES(sequenceNum, checkSum, sessionKey):
# [MS-NRPC] Section 3.3.4.2.1, point 9
IV = checkSum[:8] + checkSum[:8]
Cipher = AES.new(sessionKey, AES.MODE_CFB, IV)
return Cipher.decrypt(sequenceNum)
def SIGN(data, confounder, sequenceNum, key, aes = False):
if aes is False:
signature = NL_AUTH_SIGNATURE()
signature['SignatureAlgorithm'] = NL_SIGNATURE_HMAC_MD5
if confounder == '':
signature['SealAlgorithm'] = NL_SEAL_NOT_ENCRYPTED
else:
signature['SealAlgorithm'] = NL_SEAL_RC4
signature['Checksum'] = ComputeNetlogonSignatureMD5(signature, data, confounder, key)
signature['SequenceNumber'] = encryptSequenceNumberRC4(deriveSequenceNumber(sequenceNum), signature['Checksum'], key)
return signature
else:
signature = NL_AUTH_SIGNATURE()
signature['SignatureAlgorithm'] = NL_SIGNATURE_HMAC_SHA256
if confounder == '':
signature['SealAlgorithm'] = NL_SEAL_NOT_ENCRYPTED
else:
signature['SealAlgorithm'] = NL_SEAL_AES128
signature['Checksum'] = ComputeNetlogonSignatureAES(signature, data, confounder, key)
signature['SequenceNumber'] = encryptSequenceNumberAES(deriveSequenceNumber(sequenceNum), signature['Checksum'], key)
return signature
def SEAL(data, confounder, sequenceNum, key, aes = False):
signature = SIGN(data, confounder, sequenceNum, key, aes)
sequenceNum = deriveSequenceNumber(sequenceNum)
XorKey = []
for i in key:
XorKey.append(chr(ord(i) ^ 0xf0))
XorKey = ''.join(XorKey)
if aes is False:
hm = hmac.new(XorKey)
hm.update('\x00'*4)
hm2 = hmac.new(hm.digest())
hm2.update(sequenceNum)
encryptionKey = hm2.digest()
cipher = ARC4.new(encryptionKey)
cfounder = cipher.encrypt(confounder)
cipher = ARC4.new(encryptionKey)
encrypted = cipher.encrypt(data)
signature['Confounder'] = cfounder
return encrypted, signature
else:
IV = sequenceNum + sequenceNum
cipher = AES.new(XorKey, AES.MODE_CFB, IV)
cfounder = cipher.encrypt(confounder)
encrypted = cipher.encrypt(data)
signature['Confounder'] = cfounder
return encrypted, signature
def UNSEAL(data, auth_data, key, aes = False):
auth_data = NL_AUTH_SIGNATURE(auth_data)
XorKey = []
for i in key:
XorKey.append(chr(ord(i) ^ 0xf0))
XorKey = ''.join(XorKey)
if aes is False:
sequenceNum = decryptSequenceNumberRC4(auth_data['SequenceNumber'], auth_data['Checksum'], key)
hm = hmac.new(XorKey)
hm.update('\x00'*4)
hm2 = hmac.new(hm.digest())
hm2.update(sequenceNum)
encryptionKey = hm2.digest()
cipher = ARC4.new(encryptionKey)
cfounder = cipher.encrypt(auth_data['Confounder'])
cipher = ARC4.new(encryptionKey)
plain = cipher.encrypt(data)
return plain, cfounder
else:
sequenceNum = decryptSequenceNumberAES(auth_data['SequenceNumber'], auth_data['Checksum'], key)
IV = sequenceNum + sequenceNum
cipher = AES.new(XorKey, AES.MODE_CFB, IV)
cfounder = cipher.decrypt(auth_data['Confounder'])
plain = cipher.decrypt(data)
return plain, cfounder
def getSSPType1(workstation='', domain='', signingRequired=False):
auth = NL_AUTH_MESSAGE()
auth['Flags'] = 0
auth['Buffer'] = ''
auth['Flags'] |= NL_AUTH_MESSAGE_NETBIOS_DOMAIN
if domain != '':
auth['Buffer'] = auth['Buffer'] + domain + '\x00'
else:
auth['Buffer'] = auth['Buffer'] + 'WORKGROUP\x00'
auth['Flags'] |= NL_AUTH_MESSAGE_NETBIOS_HOST
if workstation != '':
auth['Buffer'] = auth['Buffer'] + workstation + '\x00'
else:
auth['Buffer'] = auth['Buffer'] + 'MYHOST\x00'
auth['Flags'] |= NL_AUTH_MESSAGE_NETBIOS_HOST_UTF8
if workstation != '':
auth['Buffer'] += pack('<B',len(workstation)) + workstation + '\x00'
else:
auth['Buffer'] += '\x06MYHOST\x00'
return auth
################################################################################
# RPC CALLS
################################################################################
# 3.5.4.3.1 DsrGetDcNameEx2 (Opnum 34)
class DsrGetDcNameEx2(NDRCALL):
opnum = 34
structure = (
('ComputerName',PLOGONSRV_HANDLE),
('AccountName', LPWSTR),
('AllowableAccountControlBits', ULONG),
('DomainName',LPWSTR),
('DomainGuid',PGUID),
('SiteName',LPWSTR),
('Flags',ULONG),
)
class DsrGetDcNameEx2Response(NDRCALL):
structure = (
('DomainControllerInfo',PDOMAIN_CONTROLLER_INFOW),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.2 DsrGetDcNameEx (Opnum 27)
class DsrGetDcNameEx(NDRCALL):
opnum = 27
structure = (
('ComputerName',PLOGONSRV_HANDLE),
('DomainName',LPWSTR),
('DomainGuid',PGUID),
('SiteName',LPWSTR),
('Flags',ULONG),
)
class DsrGetDcNameExResponse(NDRCALL):
structure = (
('DomainControllerInfo',PDOMAIN_CONTROLLER_INFOW),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.3 DsrGetDcName (Opnum 20)
class DsrGetDcName(NDRCALL):
opnum = 20
structure = (
('ComputerName',PLOGONSRV_HANDLE),
('DomainName',LPWSTR),
('DomainGuid',PGUID),
('SiteGuid',PGUID),
('Flags',ULONG),
)
class DsrGetDcNameResponse(NDRCALL):
structure = (
('DomainControllerInfo',PDOMAIN_CONTROLLER_INFOW),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.4 NetrGetDCName (Opnum 11)
class NetrGetDCName(NDRCALL):
opnum = 11
structure = (
('ServerName',LOGONSRV_HANDLE),
('DomainName',LPWSTR),
)
class NetrGetDCNameResponse(NDRCALL):
structure = (
('Buffer',LPWSTR),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.5 NetrGetAnyDCName (Opnum 13)
class NetrGetAnyDCName(NDRCALL):
opnum = 13
structure = (
('ServerName',PLOGONSRV_HANDLE),
('DomainName',LPWSTR),
)
class NetrGetAnyDCNameResponse(NDRCALL):
structure = (
('Buffer',LPWSTR),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.6 DsrGetSiteName (Opnum 28)
class DsrGetSiteName(NDRCALL):
opnum = 28
structure = (
('ComputerName',PLOGONSRV_HANDLE),
)
class DsrGetSiteNameResponse(NDRCALL):
structure = (
('SiteName',LPWSTR),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.7 DsrGetDcSiteCoverageW (Opnum 38)
class DsrGetDcSiteCoverageW(NDRCALL):
opnum = 38
structure = (
('ServerName',PLOGONSRV_HANDLE),
)
class DsrGetDcSiteCoverageWResponse(NDRCALL):
structure = (
('SiteNames',PNL_SITE_NAME_ARRAY),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.8 DsrAddressToSiteNamesW (Opnum 33)
class DsrAddressToSiteNamesW(NDRCALL):
opnum = 33
structure = (
('ComputerName',PLOGONSRV_HANDLE),
('EntryCount',ULONG),
('SocketAddresses',NL_SOCKET_ADDRESS_ARRAY),
)
class DsrAddressToSiteNamesWResponse(NDRCALL):
structure = (
('SiteNames',PNL_SITE_NAME_ARRAY),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.9 DsrAddressToSiteNamesExW (Opnum 37)
class DsrAddressToSiteNamesExW(NDRCALL):
opnum = 37
structure = (
('ComputerName',PLOGONSRV_HANDLE),
('EntryCount',ULONG),
('SocketAddresses',NL_SOCKET_ADDRESS_ARRAY),
)
class DsrAddressToSiteNamesExWResponse(NDRCALL):
structure = (
('SiteNames',PNL_SITE_NAME_EX_ARRAY),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.10 DsrDeregisterDnsHostRecords (Opnum 41)
class DsrDeregisterDnsHostRecords(NDRCALL):
opnum = 41
structure = (
('ServerName',PLOGONSRV_HANDLE),
('DnsDomainName',LPWSTR),
('DomainGuid',PGUID),
('DsaGuid',PGUID),
('DnsHostName',WSTR),
)
class DsrDeregisterDnsHostRecordsResponse(NDRCALL):
structure = (
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.3.11 DSRUpdateReadOnlyServerDnsRecords (Opnum 48)
class DSRUpdateReadOnlyServerDnsRecords(NDRCALL):
opnum = 48
structure = (
('ServerName',PLOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('SiteName',LPWSTR),
('DnsTtl',ULONG),
('DnsNames',NL_DNS_NAME_INFO_ARRAY),
)
class DSRUpdateReadOnlyServerDnsRecordsResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DnsNames',NL_DNS_NAME_INFO_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.1 NetrServerReqChallenge (Opnum 4)
class NetrServerReqChallenge(NDRCALL):
opnum = 4
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('ComputerName',WSTR),
('ClientChallenge',NETLOGON_CREDENTIAL),
)
class NetrServerReqChallengeResponse(NDRCALL):
structure = (
('ServerChallenge',NETLOGON_CREDENTIAL),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.2 NetrServerAuthenticate3 (Opnum 26)
class NetrServerAuthenticate3(NDRCALL):
opnum = 26
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('SecureChannelType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('ClientCredential',NETLOGON_CREDENTIAL),
('NegotiateFlags',ULONG),
)
class NetrServerAuthenticate3Response(NDRCALL):
structure = (
('ServerCredential',NETLOGON_CREDENTIAL),
('NegotiateFlags',ULONG),
('AccountRid',ULONG),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.3 NetrServerAuthenticate2 (Opnum 15)
class NetrServerAuthenticate2(NDRCALL):
opnum = 15
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('SecureChannelType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('ClientCredential',NETLOGON_CREDENTIAL),
('NegotiateFlags',ULONG),
)
class NetrServerAuthenticate2Response(NDRCALL):
structure = (
('ServerCredential',NETLOGON_CREDENTIAL),
('NegotiateFlags',ULONG),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.4 NetrServerAuthenticate (Opnum 5)
class NetrServerAuthenticate(NDRCALL):
opnum = 5
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('SecureChannelType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('ClientCredential',NETLOGON_CREDENTIAL),
)
class NetrServerAuthenticateResponse(NDRCALL):
structure = (
('ServerCredential',NETLOGON_CREDENTIAL),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.5 NetrServerPasswordSet2 (Opnum 30)
# 3.5.4.4.6 NetrServerPasswordSet (Opnum 6)
# 3.5.4.4.7 NetrServerPasswordGet (Opnum 31)
class NetrServerPasswordGet(NDRCALL):
opnum = 31
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('AccountType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
)
class NetrServerPasswordGetResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('EncryptedNtOwfPassword',ENCRYPTED_NT_OWF_PASSWORD),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.8 NetrServerTrustPasswordsGet (Opnum 42)
class NetrServerTrustPasswordsGet(NDRCALL):
opnum = 42
structure = (
('TrustedDcName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('SecureChannelType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
)
class NetrServerTrustPasswordsGetResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('EncryptedNewOwfPassword',ENCRYPTED_NT_OWF_PASSWORD),
('EncryptedOldOwfPassword',ENCRYPTED_NT_OWF_PASSWORD),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.9 NetrLogonGetDomainInfo (Opnum 29)
class NetrLogonGetDomainInfo(NDRCALL):
opnum = 29
structure = (
('ServerName',LOGONSRV_HANDLE),
('ComputerName',LPWSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('Level',DWORD),
('WkstaBuffer',NETLOGON_WORKSTATION_INFORMATION),
)
class NetrLogonGetDomainInfoResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DomBuffer',NETLOGON_DOMAIN_INFORMATION),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.10 NetrLogonGetCapabilities (Opnum 21)
class NetrLogonGetCapabilities(NDRCALL):
opnum = 21
structure = (
('ServerName',LOGONSRV_HANDLE),
('ComputerName',LPWSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('QueryLevel',DWORD),
)
class NetrLogonGetCapabilitiesResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('ServerCapabilities',NETLOGON_CAPABILITIES),
('ErrorCode',NTSTATUS),
)
# 3.5.4.4.11 NetrChainSetClientAttributes (Opnum 49)
# 3.5.4.5.1 NetrLogonSamLogonEx (Opnum 39)
class NetrLogonSamLogonEx(NDRCALL):
opnum = 39
structure = (
('LogonServer',LPWSTR),
('ComputerName',LPWSTR),
('LogonLevel',NETLOGON_LOGON_INFO_CLASS),
('LogonInformation',NETLOGON_LEVEL),
('ValidationLevel',NETLOGON_VALIDATION_INFO_CLASS),
('ExtraFlags',ULONG),
)
class NetrLogonSamLogonExResponse(NDRCALL):
structure = (
('ValidationInformation',NETLOGON_VALIDATION),
('Authoritative',UCHAR),
('ExtraFlags',ULONG),
('ErrorCode',NTSTATUS),
)
# 3.5.4.5.2 NetrLogonSamLogonWithFlags (Opnum 45)
class NetrLogonSamLogonWithFlags(NDRCALL):
opnum = 45
structure = (
('LogonServer',LPWSTR),
('ComputerName',LPWSTR),
('Authenticator',PNETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('LogonLevel',NETLOGON_LOGON_INFO_CLASS),
('LogonInformation',NETLOGON_LEVEL),
('ValidationLevel',NETLOGON_VALIDATION_INFO_CLASS),
('ExtraFlags',ULONG),
)
class NetrLogonSamLogonWithFlagsResponse(NDRCALL):
structure = (
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('ValidationInformation',NETLOGON_VALIDATION),
('Authoritative',UCHAR),
('ExtraFlags',ULONG),
('ErrorCode',NTSTATUS),
)
# 3.5.4.5.3 NetrLogonSamLogon (Opnum 2)
class NetrLogonSamLogon(NDRCALL):
opnum = 2
structure = (
('LogonServer',LPWSTR),
('ComputerName',LPWSTR),
('Authenticator',PNETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('LogonLevel',NETLOGON_LOGON_INFO_CLASS),
('LogonInformation',NETLOGON_LEVEL),
('ValidationLevel',NETLOGON_VALIDATION_INFO_CLASS),
)
class NetrLogonSamLogonResponse(NDRCALL):
structure = (
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('ValidationInformation',NETLOGON_VALIDATION),
('Authoritative',UCHAR),
('ErrorCode',NTSTATUS),
)
# 3.5.4.5.4 NetrLogonSamLogoff (Opnum 3)
class NetrLogonSamLogoff(NDRCALL):
opnum = 3
structure = (
('LogonServer',LPWSTR),
('ComputerName',LPWSTR),
('Authenticator',PNETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('LogonLevel',NETLOGON_LOGON_INFO_CLASS),
('LogonInformation',NETLOGON_LEVEL),
)
class NetrLogonSamLogoffResponse(NDRCALL):
structure = (
('ReturnAuthenticator',PNETLOGON_AUTHENTICATOR),
('ErrorCode',NTSTATUS),
)
# 3.5.4.6.1 NetrDatabaseDeltas (Opnum 7)
class NetrDatabaseDeltas(NDRCALL):
opnum = 7
structure = (
('PrimaryName',LOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DatabaseID',DWORD),
('DomainModifiedCount',NLPR_MODIFIED_COUNT),
('PreferredMaximumLength',DWORD),
)
class NetrDatabaseDeltasResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DomainModifiedCount',NLPR_MODIFIED_COUNT),
('DeltaArray',PNETLOGON_DELTA_ENUM_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.6.2 NetrDatabaseSync2 (Opnum 16)
class NetrDatabaseSync2(NDRCALL):
opnum = 16
structure = (
('PrimaryName',LOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DatabaseID',DWORD),
('RestartState',SYNC_STATE),
('SyncContext',ULONG),
('PreferredMaximumLength',DWORD),
)
class NetrDatabaseSync2Response(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('SyncContext',ULONG),
('DeltaArray',PNETLOGON_DELTA_ENUM_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.6.3 NetrDatabaseSync (Opnum 8)
class NetrDatabaseSync(NDRCALL):
opnum = 8
structure = (
('PrimaryName',LOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DatabaseID',DWORD),
('SyncContext',ULONG),
('PreferredMaximumLength',DWORD),
)
class NetrDatabaseSyncResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('SyncContext',ULONG),
('DeltaArray',PNETLOGON_DELTA_ENUM_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.6.4 NetrDatabaseRedo (Opnum 17)
class NetrDatabaseRedo(NDRCALL):
opnum = 17
structure = (
('PrimaryName',LOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('ChangeLogEntry',PUCHAR_ARRAY),
('ChangeLogEntrySize',DWORD),
)
class NetrDatabaseRedoResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('DeltaArray',PNETLOGON_DELTA_ENUM_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.1 DsrEnumerateDomainTrusts (Opnum 40)
class DsrEnumerateDomainTrusts(NDRCALL):
opnum = 40
structure = (
('ServerName',PLOGONSRV_HANDLE),
('Flags',ULONG),
)
class DsrEnumerateDomainTrustsResponse(NDRCALL):
structure = (
('Domains',NETLOGON_TRUSTED_DOMAIN_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.2 NetrEnumerateTrustedDomainsEx (Opnum 36)
class NetrEnumerateTrustedDomainsEx(NDRCALL):
opnum = 36
structure = (
('ServerName',PLOGONSRV_HANDLE),
)
class NetrEnumerateTrustedDomainsExResponse(NDRCALL):
structure = (
('Domains',NETLOGON_TRUSTED_DOMAIN_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.3 NetrEnumerateTrustedDomains (Opnum 19)
class NetrEnumerateTrustedDomains(NDRCALL):
opnum = 19
structure = (
('ServerName',PLOGONSRV_HANDLE),
)
class NetrEnumerateTrustedDomainsResponse(NDRCALL):
structure = (
('DomainNameBuffer',DOMAIN_NAME_BUFFER),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.4 NetrGetForestTrustInformation (Opnum 44)
class NetrGetForestTrustInformation(NDRCALL):
opnum = 44
structure = (
('ServerName',PLOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('Flags',DWORD),
)
class NetrGetForestTrustInformationResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('ForestTrustInfo',PLSA_FOREST_TRUST_INFORMATION),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.5 DsrGetForestTrustInformation (Opnum 43)
class DsrGetForestTrustInformation(NDRCALL):
opnum = 43
structure = (
('ServerName',PLOGONSRV_HANDLE),
('TrustedDomainName',LPWSTR),
('Flags',DWORD),
)
class DsrGetForestTrustInformationResponse(NDRCALL):
structure = (
('ForestTrustInfo',PLSA_FOREST_TRUST_INFORMATION),
('ErrorCode',NTSTATUS),
)
# 3.5.4.7.6 NetrServerGetTrustInfo (Opnum 46)
class NetrServerGetTrustInfo(NDRCALL):
opnum = 46
structure = (
('TrustedDcName',PLOGONSRV_HANDLE),
('AccountName',WSTR),
('SecureChannelType',NETLOGON_SECURE_CHANNEL_TYPE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
)
class NetrServerGetTrustInfoResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('EncryptedNewOwfPassword',ENCRYPTED_NT_OWF_PASSWORD),
('EncryptedOldOwfPassword',ENCRYPTED_NT_OWF_PASSWORD),
('TrustInfo',PNL_GENERIC_RPC_DATA),
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.1 NetrLogonGetTrustRid (Opnum 23)
class NetrLogonGetTrustRid(NDRCALL):
opnum = 23
structure = (
('ServerName',PLOGONSRV_HANDLE),
('DomainName',LPWSTR),
)
class NetrLogonGetTrustRidResponse(NDRCALL):
structure = (
('Rid',ULONG),
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.2 NetrLogonComputeServerDigest (Opnum 24)
class NetrLogonComputeServerDigest(NDRCALL):
opnum = 24
structure = (
('ServerName',PLOGONSRV_HANDLE),
('Rid',ULONG),
('Message',UCHAR_ARRAY),
('MessageSize',ULONG),
)
class NetrLogonComputeServerDigestResponse(NDRCALL):
structure = (
('NewMessageDigest',CHAR_FIXED_16_ARRAY),
('OldMessageDigest',CHAR_FIXED_16_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.3 NetrLogonComputeClientDigest (Opnum 25)
class NetrLogonComputeClientDigest(NDRCALL):
opnum = 25
structure = (
('ServerName',PLOGONSRV_HANDLE),
('DomainName',LPWSTR),
('Message',UCHAR_ARRAY),
('MessageSize',ULONG),
)
class NetrLogonComputeClientDigestResponse(NDRCALL):
structure = (
('NewMessageDigest',CHAR_FIXED_16_ARRAY),
('OldMessageDigest',CHAR_FIXED_16_ARRAY),
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.4 NetrLogonSendToSam (Opnum 32)
class NetrLogonSendToSam(NDRCALL):
opnum = 32
structure = (
('PrimaryName',PLOGONSRV_HANDLE),
('ComputerName',WSTR),
('Authenticator',NETLOGON_AUTHENTICATOR),
('OpaqueBuffer',UCHAR_ARRAY),
('OpaqueBufferSize',ULONG),
)
class NetrLogonSendToSamResponse(NDRCALL):
structure = (
('ReturnAuthenticator',NETLOGON_AUTHENTICATOR),
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.5 NetrLogonSetServiceBits (Opnum 22)
class NetrLogonSetServiceBits(NDRCALL):
opnum = 22
structure = (
('ServerName',PLOGONSRV_HANDLE),
('ServiceBitsOfInterest',DWORD),
('ServiceBits',DWORD),
)
class NetrLogonSetServiceBitsResponse(NDRCALL):
structure = (
('ErrorCode',NTSTATUS),
)
# 3.5.4.8.6 NetrLogonGetTimeServiceParentDomain (Opnum 35)
class NetrLogonGetTimeServiceParentDomain(NDRCALL):
opnum = 35
structure = (
('ServerName',PLOGONSRV_HANDLE),
)
class NetrLogonGetTimeServiceParentDomainResponse(NDRCALL):
structure = (
('DomainName',LPWSTR),
('PdcSameSite',LONG),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.9.1 NetrLogonControl2Ex (Opnum 18)
class NetrLogonControl2Ex(NDRCALL):
opnum = 18
structure = (
('ServerName',PLOGONSRV_HANDLE),
('FunctionCode',DWORD),
('QueryLevel',DWORD),
('Data',NETLOGON_CONTROL_DATA_INFORMATION),
)
class NetrLogonControl2ExResponse(NDRCALL):
structure = (
('Buffer',NETLOGON_CONTROL_DATA_INFORMATION),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.9.2 NetrLogonControl2 (Opnum 14)
class NetrLogonControl2(NDRCALL):
opnum = 14
structure = (
('ServerName',PLOGONSRV_HANDLE),
('FunctionCode',DWORD),
('QueryLevel',DWORD),
('Data',NETLOGON_CONTROL_DATA_INFORMATION),
)
class NetrLogonControl2Response(NDRCALL):
structure = (
('Buffer',NETLOGON_CONTROL_DATA_INFORMATION),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.9.3 NetrLogonControl (Opnum 12)
class NetrLogonControl(NDRCALL):
opnum = 12
structure = (
('ServerName',PLOGONSRV_HANDLE),
('FunctionCode',DWORD),
('QueryLevel',DWORD),
('Data',NETLOGON_CONTROL_DATA_INFORMATION),
)
class NetrLogonControlResponse(NDRCALL):
structure = (
('Buffer',NETLOGON_CONTROL_DATA_INFORMATION),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.10.1 NetrLogonUasLogon (Opnum 0)
class NetrLogonUasLogon(NDRCALL):
opnum = 0
structure = (
('ServerName',PLOGONSRV_HANDLE),
('UserName',WSTR),
('Workstation',WSTR),
)
class NetrLogonUasLogonResponse(NDRCALL):
structure = (
('ValidationInformation',PNETLOGON_VALIDATION_UAS_INFO),
('ErrorCode',NET_API_STATUS),
)
# 3.5.4.10.2 NetrLogonUasLogoff (Opnum 1)
class NetrLogonUasLogoff(NDRCALL):
opnum = 1
structure = (
('ServerName',PLOGONSRV_HANDLE),
('UserName',WSTR),
('Workstation',WSTR),
)
class NetrLogonUasLogoffResponse(NDRCALL):
structure = (
('LogoffInformation',NETLOGON_LOGOFF_UAS_INFO),
('ErrorCode',NET_API_STATUS),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (NetrLogonUasLogon, NetrLogonUasLogonResponse),
1 : (NetrLogonUasLogoff, NetrLogonUasLogoffResponse),
2 : (NetrLogonSamLogon, NetrLogonSamLogonResponse),
3 : (NetrLogonSamLogoff, NetrLogonSamLogoffResponse),
4 : (NetrServerReqChallenge, NetrServerReqChallengeResponse),
5 : (NetrServerAuthenticate, NetrServerAuthenticateResponse),
# 6 : (NetrServerPasswordSet, NetrServerPasswordSetResponse),
7 : (NetrDatabaseDeltas, NetrDatabaseDeltasResponse),
8 : (NetrDatabaseSync, NetrDatabaseSyncResponse),
# 9 : (NetrAccountDeltas, NetrAccountDeltasResponse),
# 10 : (NetrAccountSync, NetrAccountSyncResponse),
11 : (NetrGetDCName, NetrGetDCNameResponse),
12 : (NetrLogonControl, NetrLogonControlResponse),
13 : (NetrGetAnyDCName, NetrGetAnyDCNameResponse),
14 : (NetrLogonControl2, NetrLogonControl2Response),
15 : (NetrServerAuthenticate2, NetrServerAuthenticate2Response),
16 : (NetrDatabaseSync2, NetrDatabaseSync2Response),
17 : (NetrDatabaseRedo, NetrDatabaseRedoResponse),
18 : (NetrLogonControl2Ex, NetrLogonControl2ExResponse),
19 : (NetrEnumerateTrustedDomains, NetrEnumerateTrustedDomainsResponse),
20 : (DsrGetDcName, DsrGetDcNameResponse),
21 : (NetrLogonGetCapabilities, NetrLogonGetCapabilitiesResponse),
22 : (NetrLogonSetServiceBits, NetrLogonSetServiceBitsResponse),
23 : (NetrLogonGetTrustRid, NetrLogonGetTrustRidResponse),
24 : (NetrLogonComputeServerDigest, NetrLogonComputeServerDigestResponse),
25 : (NetrLogonComputeClientDigest, NetrLogonComputeClientDigestResponse),
26 : (NetrServerAuthenticate3, NetrServerAuthenticate3Response),
27 : (DsrGetDcNameEx, DsrGetDcNameExResponse),
28 : (DsrGetSiteName, DsrGetSiteNameResponse),
29 : (NetrLogonGetDomainInfo, NetrLogonGetDomainInfoResponse),
# 30 : (NetrServerPasswordSet2, NetrServerPasswordSet2Response),
31 : (NetrServerPasswordGet, NetrServerPasswordGetResponse),
32 : (NetrLogonSendToSam, NetrLogonSendToSamResponse),
33 : (DsrAddressToSiteNamesW, DsrAddressToSiteNamesWResponse),
34 : (DsrGetDcNameEx2, DsrGetDcNameEx2Response),
35 : (NetrLogonGetTimeServiceParentDomain, NetrLogonGetTimeServiceParentDomainResponse),
36 : (NetrEnumerateTrustedDomainsEx, NetrEnumerateTrustedDomainsExResponse),
37 : (DsrAddressToSiteNamesExW, DsrAddressToSiteNamesExWResponse),
38 : (DsrGetDcSiteCoverageW, DsrGetDcSiteCoverageWResponse),
39 : (NetrLogonSamLogonEx, NetrLogonSamLogonExResponse),
40 : (DsrEnumerateDomainTrusts, DsrEnumerateDomainTrustsResponse),
41 : (DsrDeregisterDnsHostRecords, DsrDeregisterDnsHostRecordsResponse),
42 : (NetrServerTrustPasswordsGet, NetrServerTrustPasswordsGetResponse),
43 : (DsrGetForestTrustInformation, DsrGetForestTrustInformationResponse),
44 : (NetrGetForestTrustInformation, NetrGetForestTrustInformationResponse),
45 : (NetrLogonSamLogonWithFlags, NetrLogonSamLogonWithFlagsResponse),
46 : (NetrServerGetTrustInfo, NetrServerGetTrustInfoResponse),
# 48 : (DsrUpdateReadOnlyServerDnsRecords, DsrUpdateReadOnlyServerDnsRecordsResponse),
# 49 : (NetrChainSetClientAttributes, NetrChainSetClientAttributesResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def hNetrServerReqChallenge(dce, primaryName, computerName, clientChallenge):
request = NetrServerReqChallenge()
request['PrimaryName'] = checkNullString(primaryName)
request['ComputerName'] = checkNullString(computerName)
request['ClientChallenge'] = clientChallenge
return dce.request(request)
def hNetrServerAuthenticate3(dce, primaryName, accountName, secureChannelType, computerName, clientCredential, negotiateFlags):
request = NetrServerAuthenticate3()
request['PrimaryName'] = checkNullString(primaryName)
request['AccountName'] = checkNullString(accountName)
request['SecureChannelType'] = secureChannelType
request['ClientCredential'] = clientCredential
request['ComputerName'] = checkNullString(computerName)
request['NegotiateFlags'] = negotiateFlags
return dce.request(request)
def hDsrGetDcNameEx2(dce, computerName, accountName, allowableAccountControlBits, domainName, domainGuid, siteName, flags):
request = DsrGetDcNameEx2()
request['ComputerName'] = checkNullString(computerName)
request['AccountName'] = checkNullString(accountName)
request['AllowableAccountControlBits'] = allowableAccountControlBits
request['DomainName'] = checkNullString(domainName)
request['DomainGuid'] = domainGuid
request['SiteName'] = checkNullString(siteName)
request['Flags'] = flags
return dce.request(request)
def hDsrGetDcNameEx(dce, computerName, domainName, domainGuid, siteName, flags):
request = DsrGetDcNameEx()
request['ComputerName'] = checkNullString(computerName)
request['DomainName'] = checkNullString(domainName)
request['DomainGuid'] = domainGuid
request['SiteName'] = siteName
request['Flags'] = flags
return dce.request(request)
def hDsrGetDcName(dce, computerName, domainName, domainGuid, siteGuid, flags):
request = DsrGetDcName()
request['ComputerName'] = checkNullString(computerName)
request['DomainName'] = checkNullString(domainName)
request['DomainGuid'] = domainGuid
request['SiteGuid'] = siteGuid
request['Flags'] = flags
return dce.request(request)
def hNetrGetAnyDCName(dce, serverName, domainName):
request = NetrGetAnyDCName()
request['ServerName'] = checkNullString(serverName)
request['DomainName'] = checkNullString(domainName)
return dce.request(request)
def hNetrGetDCName(dce, serverName, domainName):
request = NetrGetDCName()
request['ServerName'] = checkNullString(serverName)
request['DomainName'] = checkNullString(domainName)
return dce.request(request)
def hDsrGetSiteName(dce, computerName):
request = DsrGetSiteName()
request['ComputerName'] = checkNullString(computerName)
return dce.request(request)
def hDsrGetDcSiteCoverageW(dce, serverName):
request = DsrGetDcSiteCoverageW()
request['ServerName'] = checkNullString(serverName)
return dce.request(request)
def hNetrServerAuthenticate2(dce, primaryName, accountName, secureChannelType, computerName, clientCredential, negotiateFlags):
request = NetrServerAuthenticate2()
request['PrimaryName'] = checkNullString(primaryName)
request['AccountName'] = checkNullString(accountName)
request['SecureChannelType'] = secureChannelType
request['ClientCredential'] = clientCredential
request['ComputerName'] = checkNullString(computerName)
request['NegotiateFlags'] = negotiateFlags
return dce.request(request)
def hNetrServerAuthenticate(dce, primaryName, accountName, secureChannelType, computerName, clientCredential):
request = NetrServerAuthenticate()
request['PrimaryName'] = checkNullString(primaryName)
request['AccountName'] = checkNullString(accountName)
request['SecureChannelType'] = secureChannelType
request['ClientCredential'] = clientCredential
request['ComputerName'] = checkNullString(computerName)
return dce.request(request)
def hNetrServerPasswordGet(dce, primaryName, accountName, accountType, computerName, authenticator):
request = NetrServerPasswordGet()
request['PrimaryName'] = checkNullString(primaryName)
request['AccountName'] = checkNullString(accountName)
request['AccountType'] = accountType
request['ComputerName'] = checkNullString(computerName)
request['Authenticator'] = authenticator
return dce.request(request)
def hNetrServerTrustPasswordsGet(dce, trustedDcName, accountName, secureChannelType, computerName, authenticator):
request = NetrServerTrustPasswordsGet()
request['TrustedDcName'] = checkNullString(trustedDcName)
request['AccountName'] = checkNullString(accountName)
request['SecureChannelType'] = secureChannelType
request['ComputerName'] = checkNullString(computerName)
request['Authenticator'] = authenticator
return dce.request(request)
def hNetrLogonGetDomainInfo(dce, serverName, computerName, authenticator, returnAuthenticator=0, level=1):
request = NetrLogonGetDomainInfo()
request['ServerName'] = checkNullString(serverName)
request['ComputerName'] = checkNullString(computerName)
request['Authenticator'] = authenticator
if returnAuthenticator == 0:
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
else:
request['ReturnAuthenticator'] = returnAuthenticator
request['Level'] = 1
if level == 1:
request['WkstaBuffer']['tag'] = 1
request['WkstaBuffer']['WorkstationInfo']['DnsHostName'] = NULL
request['WkstaBuffer']['WorkstationInfo']['SiteName'] = NULL
request['WkstaBuffer']['WorkstationInfo']['OsName'] = ''
request['WkstaBuffer']['WorkstationInfo']['Dummy1'] = NULL
request['WkstaBuffer']['WorkstationInfo']['Dummy2'] = NULL
request['WkstaBuffer']['WorkstationInfo']['Dummy3'] = NULL
request['WkstaBuffer']['WorkstationInfo']['Dummy4'] = NULL
else:
request['WkstaBuffer']['tag'] = 2
request['WkstaBuffer']['LsaPolicyInfo']['LsaPolicy'] = NULL
return dce.request(request)
def hNetrLogonGetCapabilities(dce, serverName, computerName, authenticator, returnAuthenticator=0, queryLevel=1):
request = NetrLogonGetCapabilities()
request['ServerName'] = checkNullString(serverName)
request['ComputerName'] = checkNullString(computerName)
request['Authenticator'] = authenticator
if returnAuthenticator == 0:
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
else:
request['ReturnAuthenticator'] = returnAuthenticator
request['QueryLevel'] = queryLevel
return dce.request(request)
def hNetrServerGetTrustInfo(dce, trustedDcName, accountName, secureChannelType, computerName, authenticator):
request = NetrServerGetTrustInfo()
request['TrustedDcName'] = checkNullString(trustedDcName)
request['AccountName'] = checkNullString(accountName)
request['SecureChannelType'] = secureChannelType
request['ComputerName'] = checkNullString(computerName)
request['Authenticator'] = authenticator
return dce.request(request)
|
{
"content_hash": "a9becbd825a168ff685a987500d64a76",
"timestamp": "",
"source": "github",
"line_count": 2806,
"max_line_length": 160,
"avg_line_length": 32.23699215965787,
"alnum_prop": 0.6373304442995015,
"repo_name": "hecchi777/S3-SlaacSecuritySolution",
"id": "df7aba1b95be0a6766e349b84b2515aeb979d8e2",
"size": "91296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impacket-0.9.11/impacket/dcerpc/v5/nrpc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1234"
},
{
"name": "C++",
"bytes": "23499"
},
{
"name": "Perl",
"bytes": "6245"
},
{
"name": "Python",
"bytes": "3644642"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
}
|
from openprocurement.api.traversal import Root as BaseRoot
from pyramid.security import ALL_PERMISSIONS, Allow # , Everyone
class Root(BaseRoot):
__acl__ = [
# (Allow, Everyone, 'view_transfer'),
(Allow, 'g:brokers', 'view_transfer'),
(Allow, 'g:brokers', 'create_transfer'),
(Allow, 'g:admins', ALL_PERMISSIONS),
]
def factory(request):
root = Root(request)
if not request.matchdict or not request.matchdict.get('transfer_id'):
return root
request.validated['transfer_id'] = request.matchdict['transfer_id']
transfer = request.transfer
transfer.__parent__ = root
request.validated['transfer'] = transfer
request.validated['id'] = request.matchdict['transfer_id']
return transfer
|
{
"content_hash": "c690e43dce935b45ebe7192614f1196c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 30.68,
"alnum_prop": 0.6597131681877445,
"repo_name": "Leits/openprocurement.relocation.api",
"id": "4fa2a2626645200b3c038a4f60688b91258f6995",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openprocurement/relocation/api/traversal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "115274"
}
],
"symlink_target": ""
}
|
import os
from collections import OrderedDict
import numpy as np
from bokeh.embed import components
from bokeh.io import output_notebook
from bokeh.plotting import show
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.bokeh_routines import array_to_bokeh_table
from cave.utils.hpbandster_helpers import format_budgets
class PimpComparisonTable(BaseAnalyzer):
"""
Parameters are initially sorted by pimp_sort_table_by. Only parameters with an importance greater than 5 in any
of the methods are shown. Note, that the values of the used methods are not directly comparable. For more
information on the metrics, see respective tooltips."""
def __init__(self,
runscontainer,
sort_table_by,
threshold=0.05):
"""Create a html-table over all evaluated parameter-importance-methods.
Parameters are sorted after their average importance."""
super().__init__(runscontainer)
self.sort_table_by = sort_table_by
self.threshold = threshold
def get_name(self):
return "Importance Table"
def run(self):
formatted_budgets = list(format_budgets(self.runscontainer.get_budgets(), allow_whitespace=True).values())
for budget, run in zip(formatted_budgets,
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
self.result[budget] = self.plot(
pimp=run.pimp,
evaluators=list(run.share_information['evaluators'].values()),
cs=self.runscontainer.scenario.cs,
out_fn=os.path.join(run.output_dir, 'pimp.tex'),
)
def plot(self,
pimp,
evaluators,
cs,
out_fn,
):
pimp.table_for_comparison(evaluators, out_fn, style='latex')
self.logger.info('Creating pimp latex table at %s' % out_fn)
parameters = [p.name for p in cs.get_hyperparameters()]
index, values, columns = [], [], []
columns = [e.name for e in evaluators]
columns_lower = [c.lower() for c in columns]
# SORT
self.logger.debug("Sort pimp-table by %s" % self.sort_table_by)
if self.sort_table_by == "average":
# Sort parameters after average importance
p_avg = {}
for p in parameters:
imps = [e.evaluated_parameter_importance[p] for e in evaluators
if p in e.evaluated_parameter_importance]
p_avg[p] = np.mean(imps) if imps else 0
p_order = sorted(parameters, key=lambda p: p_avg[p], reverse=True)
elif self.sort_table_by in columns_lower:
def __get_key(p):
imp = evaluators[columns_lower.index(self.sort_table_by)].evaluated_parameter_importance
return imp[p] if p in imp else 0
p_order = sorted(parameters, key=__get_key, reverse=True)
else:
raise ValueError("Trying to sort importance table after {}, which "
"was not evaluated.".format(self.sort_table_by))
# PREPROCESS
for p in p_order:
values_for_p = [p]
add_parameter = False # Only add parameters where at least one evaluator shows importance > threshold
for e in evaluators:
if p in e.evaluated_parameter_importance:
# Check for threshold
value_to_add = e.evaluated_parameter_importance[p]
if value_to_add > self.threshold:
add_parameter = True
# All but forward-selection use values between 0 and 1
if e.name != 'Forward-Selection':
value_to_add = value_to_add * 100
# Create string and add uncertainty, if available
value_to_add = format(value_to_add, '05.2f') # (leading zeros for sorting!)
if (hasattr(e, 'evaluated_parameter_importance_uncertainty') and
p in e.evaluated_parameter_importance_uncertainty):
value_to_add += ' +/- ' + format(e.evaluated_parameter_importance_uncertainty[p] * 100, '.2f')
values_for_p.append(value_to_add)
else:
values_for_p.append('-')
if add_parameter:
values.append(values_for_p)
# CREATE TABLE
comp_table = DataFrame(values, columns=['Parameters'] + columns)
sortable = {c: True for c in columns}
width = {**{'Parameters': 150}, **{c: 100 for c in columns}}
bokeh_table = array_to_bokeh_table(comp_table, sortable=sortable, width=width, logger=self.logger)
return {'bokeh': bokeh_table}
def get_html(self, d=None, tooltip=None):
self.run()
if len(self.result) == 1 and None in self.result:
self.logger.debug("Detected None-key, abstracting away...")
self.result = self.result[None]
if d is not None:
d[self.name] = OrderedDict()
script, div = "", ""
for b, t in self.result.items():
s_, d_ = components(t) if b == 'bokeh' else components(t['bokeh'])
script += s_
div += d_
if d is not None:
if b == 'bokeh':
d[self.name] = {
"bokeh": (s_, d_),
"tooltip": self.__doc__,
}
else:
d[self.name][b] = {
"bokeh": (s_, d_),
"tooltip": self.__doc__,
}
return script, div
def get_jupyter(self):
self.run()
output_notebook()
for b, t in self.result.items():
show(t['bokeh'])
|
{
"content_hash": "83a0beb043ba61daa2b565883356b4ef",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 118,
"avg_line_length": 43.17857142857143,
"alnum_prop": 0.544251447477254,
"repo_name": "automl/SpySMAC",
"id": "d6c8ad45d5bd3c2326b87fa937fa16326f396321",
"size": "6045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cave/analyzer/parameter_importance/pimp_comparison_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7251"
},
{
"name": "CSS",
"bytes": "7272"
},
{
"name": "JavaScript",
"bytes": "715"
},
{
"name": "Python",
"bytes": "162357"
},
{
"name": "Shell",
"bytes": "1841"
}
],
"symlink_target": ""
}
|
if __name__ == "__main__":
import sys
import csv
idx = sys.argv[1]
reader = csv.reader(sys.stdin)
rows = []
for row in reader:
row[int(idx)] = row[int(idx)].replace('X', '0')
rows.append(row)
writer = csv.writer(sys.stdout)
writer.writerows(rows)
|
{
"content_hash": "69caed533ff0afd4d57a7ff03533464c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.5525423728813559,
"repo_name": "math4youbyusgroupillinois/large-lots",
"id": "a277c023abf091a275bd40b7681f6cf69057b17a",
"size": "295",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "data/processors/remove_x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3407"
},
{
"name": "JavaScript",
"bytes": "72259"
},
{
"name": "Makefile",
"bytes": "27870"
},
{
"name": "Python",
"bytes": "30641"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
{
"content_hash": "842151ab397d99aceb9ea296b184557d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 22.24175824175824,
"alnum_prop": 0.4743083003952569,
"repo_name": "Grantern73/AiSopp",
"id": "9d302c70e9e732d9bc7e01e3d64252dc1dfdee5b",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AiSopp/AiSopp/env/Scripts/player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2176"
},
{
"name": "HTML",
"bytes": "25325"
},
{
"name": "JavaScript",
"bytes": "10714"
},
{
"name": "Python",
"bytes": "17328"
}
],
"symlink_target": ""
}
|
def whileLoop(start):
a = start
while a < 1.0:
a = a+0.3
return a
class LL(object):
__slots__ = 'next'
def __init__(self, next=None):
self.next = next
# HACK, inheritance should automatically find this.
def __bool__(self):
return True
class Pair(object):
__slots__ = 'a', 'b'
def __init__(self, a, b):
self.a = a
self.b = b
def buildList(size):
head = None
while size > 0:
head = LL(head)
size = size-1
return head
def buildListBackwards(size):
head = None
current = None
while size > 0:
head = LL()
if current:
current.next = head
current = head
size = size-1
return head
def buildListSwitch(size):
a = None
b = None
while size > 0:
a, b = LL(b), LL(a)
size = size-1
# TODO make tuple?
return Pair(a, b)
### Can't handle asymetric returns? ###
##def isPrime(num):
## if num%2==0: return False
## test = 3
## while test < num:
## if num%test == 0:
## return False
## else:
## test = test + 2
## return True
def isPrime(num):
if num == 2: return True
if num%2 == 0: return False
test = 3
prime = True
while test < num:
if num%test == 0:
prime = False
test = test + 2
return prime
### Requires datastore support ###
def findPrimesWhile(limit):
primes = [2]
current = 3
while current < limit:
if isPrime(current):
primes.append(current)
current = current + 2
return primes
|
{
"content_hash": "70587360c9e07f4b6f14613222ba85bf",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 52,
"avg_line_length": 15.382022471910112,
"alnum_prop": 0.6121256391526662,
"repo_name": "ncbray/pystream",
"id": "4e9591c10f3da9abc1fd6ca09bcc48bafb34aa21",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/tests/full/loops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2982"
},
{
"name": "C++",
"bytes": "23809"
},
{
"name": "Python",
"bytes": "2232087"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
}
|
import ast
import eventlet
import json
import re
import requests
from oslo_config import cfg
from mistralclient.api import client as mistral
from mistralclient.api.v2 import action_executions
from st2common.constants import action as action_constants
from st2common import log as logging
from st2actions import handlers
LOG = logging.getLogger(__name__)
STATUS_MAP = dict()
STATUS_MAP[action_constants.LIVEACTION_STATUS_REQUESTED] = 'RUNNING'
STATUS_MAP[action_constants.LIVEACTION_STATUS_SCHEDULED] = 'RUNNING'
STATUS_MAP[action_constants.LIVEACTION_STATUS_RUNNING] = 'RUNNING'
STATUS_MAP[action_constants.LIVEACTION_STATUS_SUCCEEDED] = 'SUCCESS'
STATUS_MAP[action_constants.LIVEACTION_STATUS_FAILED] = 'ERROR'
def get_handler():
return MistralCallbackHandler
def get_action_execution_id_from_url(url):
match = re.search('(.+)/action_executions/(.+)', url)
if not match or len(match.groups()) != 2:
raise ValueError('Unable to extract the action execution ID '
'from the callback URL (%s).' % (url))
return match.group(2)
class MistralCallbackHandler(handlers.ActionExecutionCallbackHandler):
@staticmethod
def callback(url, context, status, result):
if status not in [action_constants.LIVEACTION_STATUS_SUCCEEDED,
action_constants.LIVEACTION_STATUS_FAILED]:
return
try:
if isinstance(result, basestring) and len(result) > 0 and result[0] in ['{', '[']:
value = ast.literal_eval(result)
if type(value) in [dict, list]:
result = value
action_execution_id = get_action_execution_id_from_url(url)
output = json.dumps(result) if type(result) in [dict, list] else str(result)
data = {'state': STATUS_MAP[status], 'output': output}
client = mistral.client(
mistral_url=cfg.CONF.mistral.v2_base_url,
username=cfg.CONF.mistral.keystone_username,
api_key=cfg.CONF.mistral.keystone_password,
project_name=cfg.CONF.mistral.keystone_project_name,
auth_url=cfg.CONF.mistral.keystone_auth_url)
manager = action_executions.ActionExecutionManager(client)
for i in range(cfg.CONF.mistral.max_attempts):
try:
LOG.info('Sending callback to %s with data %s.', url, data)
manager.update(action_execution_id, **data)
break
except requests.exceptions.ConnectionError as conn_exc:
if i == cfg.CONF.mistral.max_attempts - 1:
raise conn_exc
else:
LOG.warn(conn_exc)
eventlet.sleep(cfg.CONF.mistral.retry_wait)
except Exception as e:
LOG.exception(e)
|
{
"content_hash": "cc63ff74d4d5571514bf6ea4423ff6f3",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 94,
"avg_line_length": 36.2,
"alnum_prop": 0.6256906077348067,
"repo_name": "alfasin/st2",
"id": "ce2b41e1a9268a8f34d0fc68c7663c435e6ce9bd",
"size": "3676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2actions/st2actions/handlers/mistral.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "36110"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2907491"
},
{
"name": "Shell",
"bytes": "16363"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
import pytest
from flask import request
from werkzeug.exceptions import Forbidden
from app.utils.user import user_has_permissions
@pytest.mark.parametrize(
"permissions",
(
pytest.param(
[
# Route has a permission which the user doesn’t have
"send_messages"
],
marks=pytest.mark.xfail(raises=Forbidden),
),
[
# Route has one of the permissions which the user has
"manage_service"
],
[
# Route has more than one of the permissions which the user has
"manage_templates",
"manage_service",
],
[
# Route has one of the permissions which the user has, and one they do not
"manage_service",
"send_messages",
],
[
# Route has no specific permissions required
],
),
)
def test_permissions(
client_request,
permissions,
api_user_active,
):
request.view_args.update({"service_id": "foo"})
api_user_active["permissions"] = {"foo": ["manage_users", "manage_templates", "manage_settings"]}
api_user_active["services"] = ["foo", "bar"]
client_request.login(api_user_active)
@user_has_permissions(*permissions)
def index():
pass
index()
def test_restrict_admin_usage(
client_request,
platform_admin_user,
):
request.view_args.update({"service_id": "foo"})
client_request.login(platform_admin_user)
@user_has_permissions(restrict_admin_usage=True)
def index():
pass
with pytest.raises(Forbidden):
index()
def test_no_user_returns_redirect_to_sign_in(client_request):
client_request.logout()
@user_has_permissions()
def index():
pass
response = index()
assert response.status_code == 302
assert response.location.startswith("/sign-in?next=")
def test_user_has_permissions_for_organisation(
client_request,
api_user_active,
):
api_user_active["organisations"] = ["org_1", "org_2"]
client_request.login(api_user_active)
request.view_args = {"org_id": "org_2"}
@user_has_permissions()
def index():
pass
index()
def test_platform_admin_can_see_orgs_they_dont_have(
client_request,
platform_admin_user,
):
platform_admin_user["organisations"] = []
client_request.login(platform_admin_user)
request.view_args = {"org_id": "org_2"}
@user_has_permissions()
def index():
pass
index()
def test_cant_use_decorator_without_view_args(
client_request,
platform_admin_user,
):
client_request.login(platform_admin_user)
request.view_args = {}
@user_has_permissions()
def index():
pass
with pytest.raises(NotImplementedError):
index()
def test_user_doesnt_have_permissions_for_organisation(
client_request,
api_user_active,
):
api_user_active["organisations"] = ["org_1", "org_2"]
client_request.login(api_user_active)
request.view_args = {"org_id": "org_3"}
@user_has_permissions()
def index():
pass
with pytest.raises(Forbidden):
index()
def test_user_with_no_permissions_to_service_goes_to_templates(
client_request,
api_user_active,
):
api_user_active["permissions"] = {"foo": ["manage_users", "manage_templates", "manage_settings"]}
api_user_active["services"] = ["foo", "bar"]
client_request.login(api_user_active)
request.view_args = {"service_id": "bar"}
@user_has_permissions()
def index():
pass
index()
|
{
"content_hash": "85a68d54d2cf05cb936bd779f29a2b4a",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 101,
"avg_line_length": 22.52173913043478,
"alnum_prop": 0.6064533921676779,
"repo_name": "alphagov/notifications-admin",
"id": "37f085c58206b0724fded701fab646d3f73f459e",
"size": "3628",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/app/utils/test_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "691367"
},
{
"name": "JavaScript",
"bytes": "435783"
},
{
"name": "Jinja",
"bytes": "1979"
},
{
"name": "Makefile",
"bytes": "6501"
},
{
"name": "Procfile",
"bytes": "117"
},
{
"name": "Python",
"bytes": "2762624"
},
{
"name": "SCSS",
"bytes": "117758"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
}
|
"""
originally from http://www.djangosnippets.org/snippets/828/ by dnordberg
"""
from django.conf import settings
from django.core.management.base import CommandError, BaseCommand
from django.db import connection
import django
import logging
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-utf8', action='store_true',
dest='no_utf8_support', default=False,
help='Tells Django to not create a UTF-8 charset database'),
make_option('-U', '--user', action='store',
dest='user', default=None,
help='Use another user for the database then defined in settings.py'),
make_option('-P', '--password', action='store',
dest='password', default=None,
help='Use another password for the database then defined in settings.py'),
make_option('-D', '--dbname', action='store',
dest='dbname', default=None,
help='Use another database name then defined in settings.py (For PostgreSQL this defaults to "template1")'),
)
help = "Resets the database for this project."
def handle(self, *args, **options):
"""
Resets the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
if django.get_version()>="1.2":
raise CommandError, "reset_db is currently not compatible with Django 1.2 or higher"
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (settings.DATABASE_NAME,))
else:
confirm = 'yes'
if confirm != 'yes':
print "Reset cancelled."
return
engine = settings.DATABASE_ENGINE
user = options.get('user', settings.DATABASE_USER)
if user==None:
user = settings.DATABASE_USER
password = options.get('password', settings.DATABASE_PASSWORD)
if password==None:
password = settings.DATABASE_PASSWORD
if engine == 'sqlite3':
import os
try:
logging.info("Unlinking sqlite3 database")
os.unlink(settings.DATABASE_NAME)
except OSError:
pass
elif engine == 'mysql':
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if settings.DATABASE_HOST.startswith('/'):
kwargs['unix_socket'] = settings.DATABASE_HOST
else:
kwargs['host'] = settings.DATABASE_HOST
if settings.DATABASE_PORT:
kwargs['port'] = int(settings.DATABASE_PORT)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS %s' % settings.DATABASE_NAME
utf8_support = options.get('no_utf8_support', False) and '' or 'CHARACTER SET utf8'
create_query = 'CREATE DATABASE %s %s' % (settings.DATABASE_NAME, utf8_support)
logging.info('Executing... "' + drop_query + '"')
connection.query(drop_query)
logging.info('Executing... "' + create_query + '"')
connection.query(create_query)
elif engine == 'postgresql' or engine == 'postgresql_psycopg2':
if engine == 'postgresql':
import psycopg as Database
elif engine == 'postgresql_psycopg2':
import psycopg2 as Database
if settings.DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "You need to specify DATABASE_NAME in your Django settings file."
database_name = options.get('dbname', 'template1')
conn_string = "dbname=%s" % database_name
if settings.DATABASE_USER:
conn_string += " user=%s" % user
if settings.DATABASE_PASSWORD:
conn_string += " password='%s'" % password
if settings.DATABASE_HOST:
conn_string += " host=%s" % settings.DATABASE_HOST
if settings.DATABASE_PORT:
conn_string += " port=%s" % settings.DATABASE_PORT
connection = Database.connect(conn_string)
connection.set_isolation_level(0) #autocommit false
cursor = connection.cursor()
drop_query = 'DROP DATABASE %s' % settings.DATABASE_NAME
logging.info('Executing... "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError, e:
logging.info("Error: "+str(e))
# Encoding should be SQL_ASCII (7-bit postgres default) or prefered UTF8 (8-bit)
create_query = ("""
CREATE DATABASE %s
WITH OWNER = %s
ENCODING = 'UTF8'
TABLESPACE = %s;
""" % (settings.DATABASE_NAME, settings.DATABASE_USER, settings.DEFAULT_TABLESPACE))
logging.info('Executing... "' + create_query + '"')
cursor.execute(create_query)
else:
raise CommandError, "Unknown database engine %s" % engine
print "Reset successful."
|
{
"content_hash": "fe10e2f0ac73053b14084773e08c495b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 128,
"avg_line_length": 41.73381294964029,
"alnum_prop": 0.5699017410791243,
"repo_name": "2013Commons/HUE-SHARK",
"id": "768f6dc841ec339c9c9a881111705c8aaadac5a7",
"size": "5801",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/django_extensions-0.5-py2.7.egg/django_extensions/management/commands/reset_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9992379"
},
{
"name": "C++",
"bytes": "199612"
},
{
"name": "CSS",
"bytes": "419753"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3683071"
},
{
"name": "JavaScript",
"bytes": "1076553"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "40522057"
},
{
"name": "SQL",
"bytes": "522"
},
{
"name": "Shell",
"bytes": "27739"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
}
|
import time
import sys
import numpy as np
# Encapsulate our neural network in a class
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
"""Create a SentimenNetwork with the given settings
Args:
reviews(list) - List of reviews used for training
labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews
hidden_nodes(int) - Number of nodes to create in the hidden layer
learning_rate(float) - Learning rate to use while training
"""
# Assign a seed to our random number generator to ensure we get
# reproducable results during development
np.random.seed(1)
# process the reviews and their associated labels so that everything
# is ready for training
self.pre_process_data(reviews, labels)
# Build the network to have the number of hidden nodes and the learning rate that
# were passed into this initializer. Make the same number of input nodes as
# there are vocabulary words and create a single output node.
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
# populate review_vocab with all of the words in the given reviews
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
# Convert the vocabulary set to a list so we can access words via indices
self.review_vocab = list(review_vocab)
# populate label_vocab with all of the words in the given labels.
label_vocab = set()
for label in labels:
label_vocab.add(label)
# Convert the label vocabulary set to a list so we can access labels via indices
self.label_vocab = list(label_vocab)
# Store the sizes of the review and label vocabularies.
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
# Create a dictionary of words in the vocabulary mapped to index positions
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
# Create a dictionary of labels mapped to index positions
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Store the learning rate
self.learning_rate = learning_rate
# Initialize weights
# These are the weights between the input layer and the hidden layer.
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
# These are the weights between the hidden layer and the output layer.
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
# The input layer, a two-dimensional matrix with shape 1 x input_nodes
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
# NOTE: This if-check was not in the version of this method created in Project 2,
# and it appears in Andrew's Project 3 solution without explanation.
# It simply ensures the word is actually a key in word2index before
# accessing it, which is important because accessing an invalid key
# with raise an exception in Python. This allows us to ignore unknown
# words encountered in new reviews.
if(word in self.word2index.keys()):
## New for Project 4: changed to set to 1 instead of add 1
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
# make sure out we have a matching number of reviews and labels
assert(len(training_reviews) == len(training_labels))
# Keep track of correct predictions to display accuracy during training
correct_so_far = 0
# Remember when we started for printing time statistics
start = time.time()
# loop through all the given reviews and run a forward and backward pass,
# updating weights for every item
for i in range(len(training_reviews)):
# Get the next review and its correct label
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
# Keep track of correct predictions.
if(layer_2 >= 0.5 and label == 'POSITIVE'):
correct_so_far += 1
elif(layer_2 < 0.5 and label == 'NEGATIVE'):
correct_so_far += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the training process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \
+ " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
"""
Attempts to predict the labels for the given testing_reviews,
and uses the test_labels to calculate the accuracy of those predictions.
"""
# keep track of how many correct predictions we make
correct = 0
# we'll time how many predictions per second we make
start = time.time()
# Loop through each of the given reviews and call run to predict
# its label.
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
# For debug purposes, print out our prediction accuracy and speed
# throughout the prediction process.
elapsed_time = float(time.time() - start)
reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ " #Correct:" + str(correct) + " #Tested:" + str(i+1) \
+ " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
"""
Returns a POSITIVE or NEGATIVE prediction for the given review.
"""
# Run a forward pass through the network, like in the "train" function.
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
# Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer;
# return NEGATIVE for other values
if(layer_2[0] >= 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
|
{
"content_hash": "6e0f836948fdce7a1cb65178d4c8b482",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 151,
"avg_line_length": 42.81858407079646,
"alnum_prop": 0.580241810478454,
"repo_name": "oscarmore2/deep-learning-study",
"id": "5cf84354afa5425877e2e31f663c5ef00395d0e6",
"size": "9677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentiment-network/2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "640957"
},
{
"name": "CSS",
"bytes": "31022"
},
{
"name": "HTML",
"bytes": "7966924"
},
{
"name": "Jupyter Notebook",
"bytes": "67495315"
},
{
"name": "Python",
"bytes": "164547"
}
],
"symlink_target": ""
}
|
name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inches tall." % height
print "He's %d pounds heavy." % weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." % (
age, height, weight, age + height + weight)
|
{
"content_hash": "3ae0ec7bdef6fbb0587c85b1392104b5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 30.27777777777778,
"alnum_prop": 0.6440366972477064,
"repo_name": "Doujins/LPTHW-Notes",
"id": "8377d07a95d66003b7b3881e3b620ec3a1190e71",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "习题/ext5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3832"
}
],
"symlink_target": ""
}
|
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from ...python.Exceptions import *
from ..msg.Type import *
class ArrayValue(object):
"""
An array of values, where each value is of arbitrary type
chosen from the basic python/etch type: Boolean, Byte, Short,
Integer, Long, Float, Double, String and array of those, the
extended type ArrayValue and StructValue, and specific types
supported by ValueFactory.
ArrayValue is not protected against concurrent access.
"""
def __init__(self, array, typeCode=0, customStructType=None, dim=0):
"""
Constructs the ArrayValue
@param array
@param dim
@param customStructType
@param typeCode
"""
if array == None:
raise NullPointerException, "array == None"
if not isinstance(array, [types.ListType, types.TupleType]):
raise IllegalArgumentException, "array is not a list or tuple"
self.__array = list(array)
self.__typeCode = typeCode
self.__customStructType = customStructType
self.__dim = dim
self.__addIndex = 0
def typeCode(self):
"""
@return the TypeCode for this array value
"""
return self.__typeCode
def customStructType(self):
"""
@return a struct type if a custom type code
"""
return self.__customStructType
def dim(self):
"""
@return the dimensionality of the array
"""
return self.__dim
def __iter__(self):
return self.__array.__iter__()
def size(self):
"""
Alias for len(self)
"""
return self.__len__()
def __len__(self):
"""
@return the number of elements in the array
"""
return len(self.__array)
def __getitem__(self, idx):
"""
@param idx
@return the element at the specified index
"""
return self.__array[idx]
def compact(self):
"""
Reallocates the array so that it is only as long as needed.
"""
if self.__addIndex == self.size():
return
self.__array = self.__array[0:self.__addIndex]
def add(self, value):
"""
Adds the value to the end of the array
@param value
"""
self.__array[self.__addIndex] = value
self.__addIndex += 1
def getArray(self):
"""
@return the array value
"""
return self.__array
|
{
"content_hash": "d33e3689d087f7130123ee60170eb24d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 74,
"avg_line_length": 30.803418803418804,
"alnum_prop": 0.5566037735849056,
"repo_name": "OBIGOGIT/etch",
"id": "2792bcce27240b9ace9d15a37267138ec489e81f",
"size": "3604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "binding-python/runtime/src/main/python/etch/binding/transport/ArrayValue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2513090"
},
{
"name": "C#",
"bytes": "1514713"
},
{
"name": "C++",
"bytes": "1109601"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "Go",
"bytes": "158833"
},
{
"name": "Java",
"bytes": "2451144"
},
{
"name": "Perl",
"bytes": "290"
},
{
"name": "Python",
"bytes": "444086"
},
{
"name": "Shell",
"bytes": "62900"
},
{
"name": "VimL",
"bytes": "13679"
},
{
"name": "XSLT",
"bytes": "12890"
}
],
"symlink_target": ""
}
|
import os
import platform
import sys
from os.path import join
from numpy.distutils.system_info import platform_bits
is_msvc = (platform.platform().startswith('Windows') and
platform.python_compiler().startswith('MS'))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
libs.extend(['Advapi32', 'Kernel32'])
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
if sys.platform[:3] == "aix":
defs = [('_LARGE_FILES', None)]
else:
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
EXTRA_LIBRARIES = ['npyrandom']
if os.name != 'nt':
# Math lib
EXTRA_LIBRARIES.append('m')
# Some bit generators exclude GCC inlining
EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
if is_msvc and platform_bits == 32:
# 32-bit windows requires explicit sse2 option
EXTRA_COMPILE_ARGS += ['/arch:SSE2']
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
PCG64_DEFS = []
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
# npyrandom - a library like npymath
npyrandom_sources = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
'src/distributions/random_mvhg_count.c',
'src/distributions/random_mvhg_marginals.c',
'src/distributions/random_hypergeometric.c',
]
config.add_installed_library('npyrandom',
sources=npyrandom_sources,
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npyrandom.h
'extra_compiler_args' : (['/GL-'] if is_msvc else []),
})
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c',
f'src/{gen}/{gen}-jump.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx'],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx',
'bit_generator.pyx', 'bit_generator.pxd'],
define_macros=_defs,
)
for gen in ['_common', 'bit_generator']:
# gen.pyx
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
include_dirs=['.', 'src'],
depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
define_macros=defs,
)
config.add_data_files('{gen}.pxd')
for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
include_dirs=['.', 'src'],
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'{gen}.pyx'],
define_macros=defs,
)
config.add_data_files('_bounded_integers.pxd')
config.add_extension('mtrand',
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
'src/distributions/distributions.c',
],
include_dirs=['.', 'src', 'src/legacy'],
libraries=['m'] if os.name != 'nt' else [],
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + ['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
config.add_data_files(*depends)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
{
"content_hash": "56ed3a64c4bcef925db714bb378adb48",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 41.62068965517241,
"alnum_prop": 0.48666114333057164,
"repo_name": "WarrenWeckesser/numpy",
"id": "5d6ff2c8bdf557de85ced311b0cbaa002d946450",
"size": "6035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/random/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
import sys
import gzip
import matplotlib.pyplot as plt
x = []
y = []
line_count = 0
try:
for line in gzip.open(sys.argv[1]):
try:
tweet = eval(line.strip())
line_count += 1
if "coordinates" in tweet and tweet["coordinates"] is not None:
x.append(tweet["coordinates"]["coordinates"][0])
y.append(tweet["coordinates"]["coordinates"][1])
except:
pass
except:
pass
print("Read", line_count, "and found", len(x), "geolocated tweets", file=sys.stderr)
plt.plot(x, y, '*')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.savefig(sys.argv[1] + '.png')
plt.close()
|
{
"content_hash": "bf90e285a62f3f0cc5b1d606dc332d9b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 22.3,
"alnum_prop": 0.57847533632287,
"repo_name": "bmtgoncalves/TorinoCourse",
"id": "241c5489cb0d6f109f29da1be0dfd0b0717ef233",
"size": "693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_tweets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52819"
}
],
"symlink_target": ""
}
|
from pyramid.httpexceptions import HTTPFound
from pyramid.security import remember
from pyramid.security import forget
from pyramid.security import authenticated_userid
from pyramid.url import route_url
from pyramid.view import view_config
from pyramid.exceptions import Forbidden
import transaction
import formencode
from formencode import validators
from mudwyrm.models import DBSession, MAX_STR_LENGTH
from mudwyrm.models.auth import User, Group
from mudwyrm.forms import Schema, Form, FormRenderer
def passwords_match(value, field):
if field.parent.password.value != value:
raise validators.ValidationError(u"Passwords do not match")
class LoginSchema(Schema):
name = validators.MaxLength(MAX_STR_LENGTH)
password = validators.MaxLength(MAX_STR_LENGTH)
class UniqueUsername(formencode.FancyValidator):
def _to_python(self, value, state):
db = DBSession()
user = db.query(User.id).filter(User.name == value).first()
if user:
raise formencode.Invalid("That username is already taken",
value, state)
return value
class RegistrationSchema(Schema):
name = formencode.All(
validators.MaxLength(MAX_STR_LENGTH, not_empty=True),
UniqueUsername())
password = formencode.All(
validators.MaxLength(MAX_STR_LENGTH),
validators.MinLength(6))
password_confirmation = validators.String()
email = validators.Email(not_empty=True)
chained_validators = [
validators.FieldsMatch('password', 'password_confirmation')]
@view_config(name='login',
renderer='mudwyrm:templates/auth/login.mako')
def login(request):
came_from = request.params.get('came_from', '/')
auth_failed = False
form = Form(request, LoginSchema)
if form.validate():
db = DBSession()
user = db.query(User).filter(User.name == form.data['name']).first()
if user and user.validate_password(form.data['password']):
return HTTPFound(location=came_from,
headers=remember(request, user.id))
auth_failed = True
return dict(
auth_failed = auth_failed,
form = FormRenderer(form)
)
@view_config(name='logout')
def logout(request):
request.remote_user = None
return HTTPFound('/', headers=forget(request))
@view_config(renderer='mudwyrm:templates/auth/forbidden.mako',
context=Forbidden)
def forbidden(request):
if not authenticated_userid(request):
return HTTPFound('/login?came_from=%s' % request.url)
return {}
@view_config(name='register',
renderer='mudwyrm:templates/auth/register.mako')
def register(request):
db = DBSession()
form = Form(request, RegistrationSchema)
if form.validate():
user = form.bind(User(), exclude=['password_confirmation'])
group = db.query(Group).filter(Group.name == 'users').one()
user.groups.append(group)
db.add(user)
db.flush()
transaction.commit()
return HTTPFound(location=request.route_url('root'))
return dict(form=FormRenderer(form))
|
{
"content_hash": "334d08ff80d9311ac86a007f0874aa73",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 35.48314606741573,
"alnum_prop": 0.6719442685243825,
"repo_name": "sh-ft/mudwyrm",
"id": "06e98abca8235a19da475870de2b6c4ea7e655d2",
"size": "3158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mudwyrm/views/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1470752"
},
{
"name": "Python",
"bytes": "29536"
},
{
"name": "Ruby",
"bytes": "339"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import alembic_autogenerate_enums
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
db_url_escaped = current_app.config.get('SQLALCHEMY_DATABASE_URI').replace('%', '%%')
config.set_main_option(
"sqlalchemy.url", db_url_escaped
)
target_metadata = current_app.extensions["migrate"].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
**current_app.extensions["migrate"].configure_args,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
{
"content_hash": "31f0413dba46eba8be59393269bf7591",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 27,
"alnum_prop": 0.7019607843137254,
"repo_name": "Netflix/lemur",
"id": "91fa5fcbb6cfb91561de35be5931443329efe8f8",
"size": "2295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/migrations/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2728"
},
{
"name": "Dockerfile",
"bytes": "2597"
},
{
"name": "HTML",
"bytes": "314713"
},
{
"name": "JavaScript",
"bytes": "15496"
},
{
"name": "Makefile",
"bytes": "3791"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1530505"
},
{
"name": "Shell",
"bytes": "2339"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.plugins import plugins
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.models.organization_plugin import OrganizationPluginSerializer
from sentry.models import Project
class OrganizationPluginsEndpoint(OrganizationEndpoint):
def get(self, request, organization):
_plugins = []
for project in Project.objects.filter(organization=organization):
for plugin in plugins.configurable_for_project(project, version=None):
_plugins.append(
serialize(
plugin,
request.user,
OrganizationPluginSerializer(project),
)
)
return Response(_plugins)
|
{
"content_hash": "7a5c8628cefcb6cde7b306c5b51a4aa8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 90,
"avg_line_length": 34.88461538461539,
"alnum_prop": 0.6637265711135611,
"repo_name": "ifduyue/sentry",
"id": "57ef60bd565ce22c8464c571126408855750a95a",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
from ...Core.types import Types
from ...Core.commands import Commands
from ...Core.registers import Registers
""" Map: logic operator in programming language = the corresponding jump instruction in ASM """
relop_compare_map = {
'==': Commands.JE,
'!=': Commands.JNE,
'<': Commands.JG,
'<=': Commands.JGE,
'>': Commands.JL,
'>=': Commands.JLE
}
def relop_bexp(compiler, node):
""" Logic expression compilation """
finish_label = compiler.labels.create()
true_result_label = compiler.labels.create()
node.left.compile_asm(compiler)
compiler.types.pop()
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EBX)\
.add(Commands.POP, Registers.EAX)\
.add(Commands.CMP, [Registers.EBX, Registers.EAX])\
.add(relop_compare_map[node.op], true_result_label)
compiler.code.add(Commands.MOV, [Registers.EAX, 0])\
.add(Commands.JMP, finish_label)
compiler.code.add_label(true_result_label)\
.add(Commands.MOV, [Registers.EAX, 1])
compiler.code.add_label(finish_label)\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def and_bexp(compiler, node):
""" 'AND' operator compilation """
finish_label = compiler.labels.create()
false_result_label = compiler.labels.create()
node.left.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EAX)
# If the first operand is 0, the second operand is not checked,
# but immediately go to the false result label (lazy check)
compiler.code.add(Commands.CMP, [Registers.EAX, 0])\
.add(Commands.JZ, false_result_label)
# Otherwise, we will check the second operand
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EAX)
# If the second operand is 0, then go to the false result label
compiler.code.add(Commands.CMP, [Registers.EAX, 0])\
.add(Commands.JZ, false_result_label)
# If both operands are 1, then the result of 'AND' execution is 1, it write to the eax register
# and go to the completion label 'AND' (bypassing the false result section).
compiler.code.add(Commands.MOV, [Registers.EAX, 1])\
.add(Commands.JMP, finish_label)
# Section of false result, 0 write to the eax register
compiler.code.add_label(false_result_label)\
.add(Commands.MOV, [Registers.EAX, 0])
# Complete execution 'AND'
compiler.code.add_label(finish_label)\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.BOOL)
def or_bexp(compiler, node):
""" 'OR' operator compilation """
finish_label = compiler.labels.create()
finish_true_label = compiler.labels.create()
node.left.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EAX)
# If the first operand is not equal 0, the second is not checked,
# but immediately go to the true result label (lazy check)
compiler.code.add(Commands.CMP, [Registers.EAX, 0])\
.add(Commands.JNZ, finish_true_label)
# Otherwise, we will check the second operand
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EAX)
# If the second operand is not equal 0, then go to the true result label
compiler.code.add(Commands.CMP, [Registers.EAX, 0])\
.add(Commands.JNZ, finish_true_label)
# If both operands are 0, then the result of 'OR' execution is 0, it write to the eax register
# and go to the completion label 'OR' (bypassing the true result section).
compiler.code.add(Commands.MOV, [Registers.EAX, 0])\
.add(Commands.JMP, finish_label)
# Section of true result, 1 write to the eax register
compiler.code.add_label(finish_true_label)\
.add(Commands.MOV, [Registers.EAX, 1])
# Complete execution 'OR'
compiler.code.add_label(finish_label)\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.BOOL)
def not_bexp(compiler, node):
""" 'NOT' operator compilation """
finish_label = compiler.labels.create()
false_result_label = compiler.labels.create()
node.exp.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EAX)
# If the operand is equal 0, go to the false result section
compiler.code.add(Commands.CMP, [Registers.EAX, 0])\
.add(Commands.JZ, false_result_label)
# Section of true result, 1 write to the eax register
compiler.code.add(Commands.MOV, [Registers.EAX, 1])\
.add(Commands.JMP, finish_label)
# Section of true result, 0 write to the eax register
compiler.code.add_label(false_result_label)\
.add(Commands.MOV, [Registers.EAX, 0])
# Complete execution 'NOT'
compiler.code.add_label(finish_label)\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.BOOL)
|
{
"content_hash": "8622b7e27980adcbdfbb4268a4bea327",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 99,
"avg_line_length": 34.43835616438356,
"alnum_prop": 0.6758154335719968,
"repo_name": "PetukhovVictor/compiler",
"id": "d0263201ca2c8da2927ff7ba347fb49a78f33687",
"size": "5028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Compiler/ASM/Codegen/expressions/logical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16410"
},
{
"name": "Python",
"bytes": "239647"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
}
|
"""Module to split file into multiple segments."""
import argparse
import itertools
HEADER_TEMPLATE = '''
#ifndef ICACHE_BUSTER_H
#define ICACHE_BUSTER_H
#include <vector>
class ICacheBuster {{
public:
ICacheBuster(size_t num_methods);
void RunNextMethod();
private:
std::vector<void (*)()> methods_;
size_t current_index_;
size_t num_subset_methods_;
}};
#endif
'''
INIT_METHOD_DECL_TEMPALTE = ('extern void ICBInit_{SPLIT_NUM}'
'(std::vector<void (*)()>& methods);')
SOURCE_TEMPLATE = '''
#include <algorithm>
#include <cassert>
#include <chrono>
#include <numeric>
#include <random>
#include "ICacheBuster.h"
{INIT_METHOD_DECLS}
ICacheBuster::ICacheBuster(size_t num_methods)
: methods_({NUM_METHODS}), current_index_(0),
num_subset_methods_(num_methods) {{
assert(num_methods < {NUM_METHODS});
{INIT_METHOD_CALLS}
// make a random permutation over data
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::shuffle(methods_.begin(), methods_.end(), std::default_random_engine(seed));
}}
void ICacheBuster::RunNextMethod() {{
methods_[current_index_]();
current_index_ = (current_index_ + 1) % num_subset_methods_;
}}
'''
INIT_METHOD_CALL_TEMPLATE = ' ICBInit_{SPLIT_NUM}(methods_);'
METHOD_CODE_TEMPLATE = 'void ICBMethod_{METHOD_NUM}() {{ }}'
INIT_METHOD_CODE_TEMPLATE = '''
void ICBInit_{SPLIT_NUM}(std::vector<void (*)()>& methods) {{
{STORE_METHODS_CODE}
}}
'''
STORE_METHOD_CODE_TEMPLATE = (' methods[{METHOD_NUM}] = '
'&ICBMethod_{METHOD_NUM};')
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
results = [[e for e in t if e is not None]
for t in itertools.izip_longest(*args, fillvalue=fillvalue)]
if len(results) > 1 and len(results[-1]) != len(results[-2]):
results[-2] += results[-1]
del results[-1]
return results
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='Generate icache busting class')
parser.add_argument('--num_methods', type=int,
help='Number of methods to generate', required=True)
parser.add_argument('--output_dir', help='Location to save generated code',
required=True)
parser.add_argument('--num_splits',
help='Number of ways to split files for fast compilation',
type=int, default=1)
args = parser.parse_args()
# Generate the files
with open(args.output_dir + '/ICacheBuster.h', 'w') as f:
f.write(HEADER_TEMPLATE.format())
splits = grouper(args.num_methods / args.num_splits, xrange(args.num_methods))
for split_num in xrange(len(splits)):
with open('%s/ICacheBuster.part%d.cc' % (args.output_dir, split_num),
'w') as f:
f.write('#include <vector>\n\n')
methods_code = '\n'.join([METHOD_CODE_TEMPLATE.format(METHOD_NUM=i)
for i in splits[split_num]])
f.write(methods_code)
store_methods_code = '\n'.join(
[STORE_METHOD_CODE_TEMPLATE.format(METHOD_NUM=i)
for i in splits[split_num]])
f.write(INIT_METHOD_CODE_TEMPLATE.format(
STORE_METHODS_CODE=store_methods_code, SPLIT_NUM=split_num))
with open(args.output_dir + '/ICacheBuster.cc', 'w') as f:
init_methods_decl = '\n'.join(
[INIT_METHOD_DECL_TEMPALTE.format(SPLIT_NUM=i)
for i in xrange(args.num_splits)])
init_method_calls = '\n'.join(
[INIT_METHOD_CALL_TEMPLATE.format(SPLIT_NUM=i)
for i in xrange(args.num_splits)])
f.write(SOURCE_TEMPLATE.format(NUM_METHODS=args.num_methods,
INIT_METHOD_DECLS=init_methods_decl,
INIT_METHOD_CALLS=init_method_calls))
if __name__ == '__main__':
main()
|
{
"content_hash": "c6f9ff2a07eb1b005893ae80eacebc0b",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 83,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.6238603803073717,
"repo_name": "mbrukman/oldisim",
"id": "8ee2fa5072955ac85f850fd26e1615a586861c5e",
"size": "4458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workloads/search/gen_icache_buster.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1440"
},
{
"name": "C++",
"bytes": "236152"
},
{
"name": "Python",
"bytes": "17579"
},
{
"name": "Shell",
"bytes": "5912"
}
],
"symlink_target": ""
}
|
from importlib import import_module
# For update times
from time import time as unix_timestamp
# Enable custom node function import
import sys, os
dup = os.path.dirname
env_root = dup(dup(dup(__file__)))
sys.path.append(os.path.join(env_root, 'data'))
import get_node
class InvalidNodeError(Exception):
pass
class InvalidNodeFunctionError(Exception):
pass
class InvalidNodeFunctionAPIError(Exception):
pass
def compute_node(db, node, policy='init'):
'''
Ensure that node is up-to-date and if not then execute it's function.
Return the ensured node.
Computation policies:
policy='init' (default)
compute only if not computed before.
policy='ensure'
ensure that cache is up-to-date and execute only if necessary.
policy='force'
compute always, disregard any caches.
'''
print 'Computing node: ' + node['name']
#import pdb; pdb.set_trace()
if policy not in ['init', 'ensure', 'force']:
raise ValueError('Unknown policy: ' + policy)
# Node might not have been found
if node is None:
raise InvalidNodeError('Invalid node: None')
# Ensure that the object really has sufficient properties
if any(map(lambda x: x not in node, ['name', 'function', 'output'])):
raise InvalidNodeError('Node does not have required attributes.')
# Aliases to simplify names.
input_names = node['input']
input_times = node['input_timestamp']
fn_versions = node['function_version']
output = node['output']
fn_names = node['function']
# Ensure that the object really has sufficient properties
if not isinstance(fn_versions, list):
raise InvalidNodeError('Node\'s function versions should be a list.')
if not isinstance(fn_names, list):
raise InvalidNodeError('Node\'s function should be a list.')
# Node without function is a data node, and does not need execution
if len(fn_names) < 1:
# A data node, no functions to execute
return node
# If node has been run before, return it without recomputation
if policy == 'init' and len(fn_versions) == len(fn_names):
return node
# Gather the data for execution. Load input nodes:
input_nodes = map(lambda n: get_node.by_name_computed(db, n, policy), input_names)
# Import functions.
fn_modules = []
for fn_name in fn_names:
# Import the function
try:
modu = import_module('functions.' + fn_name)
except ImportError as e:
raise InvalidNodeFunctionError('No node function ' + fn_name +
' can be found or there is problem in the module: ' + str(e))
# Assert: import successful
if not hasattr(modu, 'execute') or not hasattr(modu, 'version'):
raise InvalidNodeFunctionAPIError('Node function should have an ' +
'execute method and version property.')
fn_modules.append(modu)
# If the function versions have remained the same
# and cached output has same timestamp as the input nodes,
# then do not recompute. However, under 'force' policy, compute
# anyway. Under 'init' policy, if we got this far, nothing is up-to-date.
if policy != 'force':
if len(input_times) == len(input_nodes): # Test if virgin
if all(map(lambda nt: nt[0]['timestamp'] == nt[1],
zip(input_nodes, input_times))):
# Output is up to date
if len(fn_names) == len(fn_versions):
if all(map(lambda fv: fv[0].version == fv[1],
zip(fn_modules, fn_versions))):
# no reason to recompute
return node
# Execute functions from left to right, feeding the prev output to
# next input. Give input nodes as parameters to the first.
input_args = input_nodes
for modu in fn_modules:
# Execute the function TODO function composition
input_args = modu.execute(input_args)
# The last input_args is the final output.
# Store results and update timestamps. Like a cache.
node['output'] = input_args
node['timestamp'] = int(unix_timestamp())
node['input_timestamp'] = map(lambda n: n['timestamp'], input_nodes)
node['function_version'] = map(lambda m: m.version, fn_modules)
#pdb.set_trace()
db.nodes.replace_one({'_id': node['_id']}, node)
return node
|
{
"content_hash": "5faeb55929a293ab08721e1bd60cd723",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 86,
"avg_line_length": 35.616,
"alnum_prop": 0.6343216531895777,
"repo_name": "axelpale/gazelledb",
"id": "12e9fa2324f99793c9cf0eb737410312a6492131",
"size": "4452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gazelledb/lib/compute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "685"
},
{
"name": "Python",
"bytes": "42065"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1beta1
async def sample_list_models():
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_models(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_ModelService_ListModels_async]
|
{
"content_hash": "08336541ce0533931732934ca6a5c5d2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 26.05,
"alnum_prop": 0.7159309021113244,
"repo_name": "googleapis/python-aiplatform",
"id": "9c4eacdd0ad56ac994d320f57772a7cdd587c61d",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import httplib
import json
import logging
from webob import Response
from ryu.app.wsgi import ControllerBase
from ryu.base import app_manager
from ryu.controller import conf_switch
from ryu.lib import dpid as dpid_lib
# REST API for switch configuration
#
# get all the switches
# GET /v1.0/conf/switches
#
# get all the configuration keys of a switch
# GET /v1.0/conf/switches/<dpid>
#
# delete all the configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>
#
# set the <key> configuration of a switch
# PUT /v1.0/conf/switches/<dpid>/<key>
#
# get the <key> configuration of a switch
# GET /v1.0/conf/switches/<dpid>/<key>
#
# delete the <key> configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>/<key>
#
# where
# <dpid>: datapath id in 16 hex
class ConfSwitchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(ConfSwitchController, self).__init__(req, link, data, **config)
self.conf_switch = data
def list_switches(self, _req, **_kwargs):
dpids = self.conf_switch.dpids()
body = json.dumps([dpid_lib.dpid_to_str(dpid) for dpid in dpids])
return Response(content_type='application/json', body=body)
@staticmethod
def _do_switch(dpid, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid is found %s' %
dpid_lib.dpid_to_str(dpid))
return ret_func(ret)
def delete_switch(self, _req, dpid, **_kwargs):
def _delete_switch(dpid):
self.conf_switch.del_dpid(dpid)
return None
def _ret(_ret):
return Response(status=httplib.ACCEPTED)
return self._do_switch(dpid, _delete_switch, _ret)
def list_keys(self, _req, dpid, **_kwargs):
def _list_keys(dpid):
return self.conf_switch.keys(dpid)
def _ret(keys):
body = json.dumps(keys)
return Response(content_type='application/json', body=body)
return self._do_switch(dpid, _list_keys, _ret)
@staticmethod
def _do_key(dpid, key, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid, key)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid/key is found %s %s' %
(dpid_lib.dpid_to_str(dpid), key))
return ret_func(ret)
def set_key(self, req, dpid, key, **_kwargs):
def _set_val(dpid, key):
val = json.loads(req.body)
self.conf_switch.set_key(dpid, key, val)
return None
def _ret(_ret):
return Response(status=httplib.CREATED)
return self._do_key(dpid, key, _set_val, _ret)
def get_key(self, _req, dpid, key, **_kwargs):
def _get_key(dpid, key):
return self.conf_switch.get_key(dpid, key)
def _ret(val):
return Response(content_type='application/json',
body=json.dumps(val))
return self._do_key(dpid, key, _get_key, _ret)
def delete_key(self, _req, dpid, key, **_kwargs):
def _delete_key(dpid, key):
self.conf_switch.del_key(dpid, key)
return None
def _ret(_ret):
return Response()
return self._do_key(dpid, key, _delete_key, _ret)
class ConfSwitchAPI(app_manager.RyuApp):
_CONTEXTS = {
'conf_switch': conf_switch.ConfSwitchSet,
}
def __init__(self, *args, **kwargs):
super(ConfSwitchAPI, self).__init__(*args, **kwargs)
self.conf_switch = kwargs['conf_switch']
wsgi = kwargs['wsgi']
mapper = wsgi.mapper
controller = ConfSwitchController
wsgi.registory[controller.__name__] = self.conf_switch
route_name = 'conf_switch'
uri = '/v1.0/conf/switches'
mapper.connect(route_name, uri, controller=controller,
action='list_switches',
conditions=dict(method=['GET']))
uri += '/{dpid}'
requirements = {'dpid': dpid_lib.DPID_PATTERN}
s = mapper.submapper(controller=controller, requirements=requirements)
s.connect(route_name, uri, action='delete_switch',
conditions=dict(method=['DELETE']))
s.connect(route_name, uri, action='list_keys',
conditions=dict(method=['GET']))
uri += '/{key}'
s.connect(route_name, uri, action='set_key',
conditions=dict(method=['PUT']))
s.connect(route_name, uri, action='get_key',
conditions=dict(method=['GET']))
s.connect(route_name, uri, action='delete_key',
conditions=dict(method=['DELETE']))
|
{
"content_hash": "7cb519d88bbe2b8116bbc8d279a42f2e",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 78,
"avg_line_length": 31.81168831168831,
"alnum_prop": 0.5797101449275363,
"repo_name": "citrix-openstack-build/ryu",
"id": "57e23474df3f8a982a0bcb3869e3142106e8c0b9",
"size": "5582",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "ryu/app/rest_conf_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "670197"
},
{
"name": "Python",
"bytes": "2583730"
},
{
"name": "Shell",
"bytes": "11931"
}
],
"symlink_target": ""
}
|
""" jabber bot definition """
## jsb imports
from jsb.lib.errors import CannotAuth, CantLogon, NoUserProvided
from jsb.lib.users import users
from jsb.utils.exception import handle_exception
from jsb.utils.trace import whichmodule
from jsb.utils.locking import lockdec
from jsb.utils.pdod import Pdod
from jsb.utils.dol import Dol
from jsb.utils.generic import stripcolor, toenc, fromenc, stripped
from jsb.lib.less import Less
from jsb.lib.callbacks import callbacks, remote_callbacks
from jsb.lib.threads import start_new_thread
from jsb.lib.botbase import BotBase
from jsb.lib.exit import globalshutdown
from jsb.lib.channelbase import ChannelBase
from jsb.lib.fleet import getfleet
from jsb.utils.lazydict import LazyDict
## jsb.socket imports
from jsb.utils.generic import waitforqueue, jabberstrip, getrandomnick, toenc, fromenc
## xmpp imports
from presence import Presence
from message import Message
from iq import Iq
from core import XMLStream
from jid import JID, InvalidJID
from errors import xmpperrors
## basic imports
import time
import urllib
import copy
import time
import Queue
import os
import threading
import thread
import types
import xml
import re
import hashlib
import logging
import cgi
import base64
import random
from hashlib import md5
## defines
cpy = copy.deepcopy
## locks
outlock = thread.allocate_lock()
inlock = thread.allocate_lock()
connectlock = thread.allocate_lock()
outlocked = lockdec(outlock)
inlocked = lockdec(inlock)
connectlocked = lockdec(connectlock)
## SXMPPBot class
class SXMPPBot(XMLStream, BotBase):
"""
xmpp bot class.
"""
def __init__(self, cfg=None, usersin=None, plugs=None, jid=None, *args, **kwargs):
BotBase.__init__(self, cfg, usersin, plugs, jid, *args, **kwargs)
if not self.cfg: raise Exception("sxmpp - config is not set.")
if not self.cfg.user: raise NoUserProvided("please make sure the user config variable is set in %s (or use -u)" % cfg.cfile)
try: self.cfg.username, self.cfg.host = self.cfg.user.split('@')
except (ValueError, TypeError): raise Exception("%s - user not set - %s" % (self.cfg.name, str(self.cfg)))
XMLStream.__init__(self, self.cfg.name)
self.type = 'sxmpp'
self.sock = None
self.lastin = None
self.test = 0
self.connecttime = 0
self.connection = None
self.jabber = True
self.jids = {}
self.topics = {}
self.timejoined = {}
self.channels409 = []
if self.state and not self.state.data.ratelimit: self.state.data.ratelimit = 0.02
try: self.cfg.port = int(self.cfg.port)
except (ValueError, TypeError): self.cfg.port = 5222
logging.debug("%s - user is %s" % (self.cfg.name, self.cfg.user))
def _resumedata(self):
""" return data needed for resuming. """
return {self.cfg.name: {
'name': self.cfg.name,
'type': self.type,
'nick': self.cfg.nick,
'server': self.cfg.server,
'port': self.cfg.port,
'password': self.cfg.password,
'ipv6': self.cfg.ipv6,
'user': self.cfg.user
}}
def _keepalive(self):
""" keepalive method .. send empty string to self every 3 minutes. """
nrsec = 0
self.stopkeepalive = False
self.sendpresence()
while not self.stopped and not self.stopkeepalive:
time.sleep(1)
nrsec += 1
if nrsec < (self.cfg.keepalive or 120): continue
else: nrsec = 0
self.sendpresence()
def sendpresence(self):
""" send presence based on status and status text set by user. """
if self.state:
if self.state.has_key('status') and self.state['status']: status = self.state['status']
else: status = ""
if self.state.has_key('show') and self.state['show']: show = self.state['show']
else: show = ""
else:
status = ""
show = ""
logging.debug('%s - keepalive - %s - %s' % (self.cfg.name, show, status))
if show and status: p = Presence({'to': self.cfg.user, 'show': show, 'status': status})
elif show: p = Presence({'to': self.cfg.user, 'show': show })
elif status: p = Presence({'to': self.cfg.user, 'status': status})
else: p = Presence({'to': self.cfg.user })
self.send(p)
def _keepchannelsalive(self):
""" channels keep alive method. """
nrsec = 0
p = Presence({'to': self.cfg.user, 'txt': '' })
self.stopkeepalive = False
while not self.stopped and not self.stopkeepalive:
time.sleep(1)
nrsec += 1
if nrsec < 600: continue
else: nrsec = 0
for chan in self.state['joinedchannels']:
if chan not in self.channels409:
p = Presence({'to': chan})
self.send(p)
def connect(self, reconnect=False):
""" connect the xmpp server. """
if not XMLStream.doconnect(self): return False
self.connection = None
iq = self.makeready()
if not iq:
logging.error('%s - connect to %s:%s (%s) failed' % (self.cfg.name, self.cfg.host, self.cfg.port, self.cfg.server))
return
self.logon(self.cfg.user, self.cfg.password, iq)
time.sleep(1)
self.sendpresence()
#self._raw("<presence/>")
#start_new_thread(self._keepalive, ())
#if self.cfg.keepchannelsalive: start_new_thread(self._keepchannelsalive, ())
if self.cfg.doroster: self.requestroster()
self.connectok.set()
self.sock.settimeout(None)
return True
def logon(self, user, password, iq):
""" logon on the xmpp server. """
try: self.auth(user, password, iq)
except CannotAuth, ex:
logging.error("cannot auth to server: %s" % str(ex))
if self.cfg.doregister:
logging.warn("%s - sleeping 10 seconds before register" % self.cfg.name)
time.sleep(10)
self.failure = ""
self.stopped = False
try:
if not self.register(user, password): self.exit() ; return
except Exception, ex: self.exit() ; raise
time.sleep(5)
iq = self.auth_sasl(user, password, iq, False)
else: logging.error("stopping .. you can try to use --register to register the bot with the server") ; raise CantLogon(user)
XMLStream.logon(self)
return True
def register(self, jid, password):
""" register the jid to the server. """
try: resource = jid.split("/")[1]
except IndexError: resource = "jsb"
logging.debug('%s - registering %s' % (self.cfg.name, jid))
iq = self.waiter("""<iq type='get'><query xmlns='jabber:iq:register'/></iq>""")
if not iq:
logging.error("%s - unable to register" % self.cfg.name)
return
iq = self.waiter("""<iq type='set'><query xmlns='jabber:iq:register'><username>%s</username><resource>%s</resource><password>%s</password></query></iq>""" % (jid.split('@')[0], resource, password))
if iq.error:
logging.warn('%s - register FAILED - %s' % (self.cfg.name, iq.error))
if not iq.error.code: logging.error("%s - can't determine error code" % self.cfg.name) ; return False
if iq.error.code == "405": logging.error("%s - this server doesn't allow registration by the bot, you need to create an account for it yourself" % self.cfg.name)
elif iq.error.code == "500": logging.error("%s - %s - %s" % (self.cfg.name, iq.error.code, iq.error.text.data))
else: logging.error("%s - %s" % (self.cfg.name, xmpperrors[iq.error.code]))
self.error = iq.error
raise Exception(iq.error)
logging.warn('%s - register ok' % self.cfg.name)
return True
def requestroster(self):
""" request roster from xmpp server. """
self._raw("<iq type='get'><query xmlns='jabber:iq:roster'/></iq>")
def disconnectHandler(self, ex):
""" disconnect handler. """
self.reconnect()
def outnocb(self, printto, txt, how=None, event=None, html=False, isrelayed=False, *args, **kwargs):
""" output txt to bot. """
if printto and printto in self.state['joinedchannels']: outtype = 'groupchat'
else: outtype = (event and event.type) or "chat"
target = printto
txt = self.normalize(txt)
#txt = stripcolor(txt)
repl = Message(event)
repl.to = target
repl.type = outtype
repl.txt = txt
if html: repl.html = txt
logging.debug("%s - reply is %s" % (self.cfg.name, repl.dump()))
if not repl.type: repl.type = 'normal'
logging.debug("%s - sxmpp - out - %s - %s" % (self.cfg.name, printto, unicode(txt)))
self.send(repl)
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']: self.say(i, txt)
def kick(self, channel, nick, reason=None):
if not reason: reason = "no reason"
self._raw("""<iq id='kick1' to='%s' type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item nick='%s' role='none'>
<reason>%s</reason>
</item>
</query>
</iq>"""% (channel, nick, reason))
def handle_iq(self, data):
""" iq handler .. overload this when needed. """
pass
def handle_message(self, data):
""" message handler. """
m = Message(data)
m.parse(self)
if data.type == 'groupchat' and data.subject:
logging.debug("%s - checking topic" % self.cfg.name)
self.topiccheck(m)
nm = Message(m)
callbacks.check(self, nm)
return
xmlns = data.get('x').xmlns or data.get("delay").xmlns
if xmlns and (xmlns == 'jabber:x:delay' or xmlns == 'urn:xmpp:delay'):
logging.info("%s - ignoring delayed message" % self.cfg.name)
return
if m.isresponse:
logging.debug("%s - message is a response" % self.cfg.name)
return
jid = None
m.origjid = m.jid
for node in m.subelements:
try: m.jid = node.x.item.jid
except (AttributeError, TypeError): continue
if self.cfg.user in m.fromm or (m.groupchat and self.cfg.nick == m.nick):
logging.debug("%s - message to self .. ignoring" % self.cfg.name)
return 0
if self.cfg.fulljids and not m.msg:
if not m.nick: utarget = None
else:
utarget = self.userhosts.get(m.nick)
logging.warn("userhost from cache for %s ==> %s" % (m.nick, utarget))
if utarget: m.userhost = m.jid = m.auth = utarget
else: m.userhost = m.jid
if m.msg: m.userhost = stripped(m.userhost)
logging.info("using %s as userhost" % m.userhost)
try:
if m.type == 'error':
if m.code: self.errorHandler(m)
else: m.nodispatch = False
except Exception, ex:
handle_exception()
self.put(m)
def errorHandler(self, event):
""" error handler .. calls the errorhandler set in the event. """
try:
logging.error("%s - error occured in %s - %s" % (self.cfg.name, event.txt, event.userhost))
event.errorHandler()
except AttributeError: logging.error('%s - unhandled error - %s' % (self.cfg.name, event.dump()))
def handle_presence(self, data):
""" presence handler. """
p = Presence(data)
p.parse()
frm = p.fromm
nickk = ""
nick = p.nick
if self.cfg.user in frm: self.pongcheck = True
if nick:
#self.userhosts[nick] = stripped(frm)
nickk = nick
jid = None
for node in p.subelements:
try:
jid = node.x.item.jid
except (AttributeError, TypeError):
continue
if nickk and jid and self.cfg.fulljids:
channel = p.channel
if not self.jids.has_key(channel):
self.jids[channel] = {}
self.jids[channel][nickk] = jid
self.userhosts[nickk] = stripped(jid)
logging.info('%s - setting jid of %s (%s) to %s' % (self.cfg.name, nickk, channel, self.userhosts[nickk]))
if p.type == 'subscribe':
pres = Presence({'to': p.fromm, 'type': 'subscribed'})
self.send(pres)
pres = Presence({'to': p.fromm, 'type': 'subscribe'})
self.send(pres)
nick = p.resource
if p.type != 'unavailable':
p.joined = True
p.type = 'available'
elif self.cfg.user in p.userhost:
try:
del self.jids[p.channel]
logging.debug('%s - removed %s channel jids' % (self.cfg.name, p.channel))
except KeyError:
pass
else:
try:
del self.jids[p.channel][p.nick]
logging.debug('%s - removed %s jid' % (self.cfg.name, p.nick))
except KeyError:
pass
if p.type == 'error':
for node in p.subelements:
try:
err = node.error.code
except (AttributeError, TypeError):
err = 'no error set'
try:
txt = node.text.data
except (AttributeError, TypeError):
txt = ""
if err:
logging.error('%s - error - %s - %s' % (self.cfg.name, err, txt))
try:
method = getattr(self,'handle_' + err)
try:
method(p)
except:
handle_exception()
except AttributeError:
pass
self.doevent(p)
def invite(self, jid):
pres = Presence({'to': jid, 'type': 'subscribe'})
self.send(pres)
time.sleep(2)
pres = Presence({'to': jid})
self.send(pres)
def send(self, what):
""" send stanza to the server. """
if not what:
logging.debug("%s - can't send empty message" % self.cfg.name)
return
try:
to = what['to']
except (KeyError, TypeError):
logging.error("%s - can't determine where to send %s to" % (self.cfg.name, str(what)))
return
try:
jid = JID(to)
except (InvalidJID, AttributeError):
logging.error("%s - invalid jid - %s - %s" % (self.cfg.name, str(to), whichmodule(2)))
return
try: del what['from']
except KeyError: pass
try:
xml = what.tojabber()
if not xml:
raise Exception("can't convert %s to xml .. bot.send()" % what)
except (AttributeError, TypeError):
handle_exception()
return
if not self.checkifvalid(xml): logging.error("%s - NOT PROPER XML - %s" % (self.cfg.name, xml))
else: self._raw(xml)
def action(self, printto, txt, fromm=None, groupchat=True, event=None, *args, **kwargs):
""" send an action. """
txt = "/me " + txt
if self.google:
fromm = self.cfg.user
if printto in self.state['joinedchannels'] and groupchat:
message = Message({'to': printto, 'txt': txt, 'type': 'groupchat'})
else: message = Message({'to': printto, 'txt': txt})
if fromm: message.fromm = fromm
self.send(message)
def save(self):
""" save bot's state. """
if self.state:
self.state.save()
def quit(self):
""" send unavailable presence. """
if self.error: return
presence = Presence({'type': 'unavailable' ,'to': self.cfg.user})
if self.state:
for i in self.state.data.joinedchannels:
presence.to = i
self.send(presence)
presence = Presence({'type': 'unavailable', 'to': self.cfg.user})
presence['from'] = self.cfg.user
self.send(presence)
def setstatus(self, status, show=""):
""" send status presence. """
if self.error: return
if self.state:
self.state['status'] = status
self.state['show'] = show
self.state.save()
presence = Presence({'status': status, 'show': show ,'to': self.cfg.user})
self.send(presence)
def shutdown(self):
self.outqueue.put_nowait(None)
def join(self, channel, password=None, nick=None):
""" join conference. """
if channel.startswith("#"): return
logging.warn("joining %s" % channel)
try:
if not nick: nick = channel.split('/')[1]
except IndexError: nick = self.cfg.nick or "jsonbot"
self.timejoined[channel] = time.time()
chan = ChannelBase(channel, self.botname)
chan.data['nick'] = nick
if password:
chan.data['key'] = password
if chan.data['key'] and not password: password = chan.data['key']
if not chan.data.has_key('cc'):
chan.data['cc'] = self.cfg['defaultcc'] or '!'
if channel not in self.state['joinedchannels']:
self.state['joinedchannels'].append(channel)
self.state.save()
if channel in self.channels409:
self.channels409.remove(channel)
chan.save()
channel = channel.split('/')[0]
q = Queue.Queue()
presence = Presence({'to': channel + '/' + nick, "xmlns": 'http://jabber.org/protocol/muc' })
if password:
presence.x = {}
presence.x['password'] = password
self.send(presence)
return 1
def part(self, channel):
""" leave conference. """
if channel.startswith("#"): return
presence = Presence({'to': channel})
presence.type = 'unavailable'
self.send(presence)
if channel in self.state['joinedchannels']: self.state['joinedchannels'].remove(channel)
self.state.save()
return 1
def outputnolog(self, printto, what, how, who=None, fromm=None):
""" do output but don't log it. """
if fromm: return
self.saynocb(printto, what)
def topiccheck(self, msg):
""" check if topic is set. """
if msg.groupchat:
try:
topic = msg.subject
if not topic: return None
self.topics[msg.channel] = (topic, msg.userhost, time.time())
logging.debug('%s - topic of %s set to %s' % (self.cfg.name, msg.channel, topic))
except AttributeError: return None
def settopic(self, channel, txt):
""" set topic. """
pres = Message({'to': channel, 'subject': txt})
pres.type = 'groupchat'
self.send(pres)
def gettopic(self, channel):
""" get topic. """
try:
topic = self.topics[channel]
return topic
except KeyError: return None
def domsg(self, msg):
""" dispatch an msg on the bot. """
self.put(msg)
def normalize(self, what):
what = stripcolor(what)
what = what.replace("\002", "")
what = what.replace("\003", "")
#what = what.replace("\n", "<br>")
what = what.replace("<br>", "\n")
what = what.replace("<li>", "*")
what = what.replace("</li>", "")
what = what.replace("<b>", "")
what = what.replace("</b>", "")
what = what.replace("<b>", "")
what = what.replace("</b>", "")
what = what.replace("<i>", "")
what = what.replace("</i>", "")
what = what.replace("<i>", "")
what = what.replace("</i>", "")
return what
def pingcheck(self):
self.pongcheck = False
self.sendpresence()
time.sleep(5)
return self.pongcheck
|
{
"content_hash": "fb3d62ddff0adbd9c9ad2474f4637db6",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 205,
"avg_line_length": 37.51009174311927,
"alnum_prop": 0.5488431247859903,
"repo_name": "Petraea/jsonbot",
"id": "9df1e83ae0c3888e78888d1f3dde4e250917333e",
"size": "20474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsb/drivers/xmpp/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36140"
},
{
"name": "JavaScript",
"bytes": "42430"
},
{
"name": "Python",
"bytes": "3234788"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
}
|
import datetime as dt
import logging
import re
from future.moves.urllib.parse import urljoin, urlencode
import uuid
from copy import deepcopy
from flask import Request as FlaskRequest
from framework import analytics
from guardian.shortcuts import get_perms
from past.builtins import basestring
# OSF imports
import itsdangerous
import pytz
from dirtyfields import DirtyFieldsMixin
from django.apps import apps
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import PermissionsMixin
from django.dispatch import receiver
from django.db import models
from django.db.models import Count, Exists, OuterRef
from django.db.models.signals import post_save
from django.utils import timezone
from guardian.shortcuts import get_objects_for_user
from framework.auth import Auth, signals, utils
from framework.auth.core import generate_verification_key
from framework.auth.exceptions import (ChangePasswordError, ExpiredTokenError,
InvalidTokenError,
MergeConfirmedRequiredError,
MergeConflictError)
from framework.exceptions import PermissionsError
from framework.sessions.utils import remove_sessions_for_user
from osf.utils.requests import get_current_request
from osf.exceptions import reraise_django_validation_errors, UserStateError
from osf.models.base import BaseModel, GuidMixin, GuidMixinQuerySet
from osf.models.notable_domain import NotableDomain
from osf.models.contributor import Contributor, RecentlyAddedContributor
from osf.models.institution import Institution
from osf.models.mixins import AddonModelMixin
from osf.models.spam import SpamMixin
from osf.models.session import Session
from osf.models.tag import Tag
from osf.models.validators import validate_email, validate_social, validate_history_item
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField, LowercaseEmailField, ensure_str
from osf.utils.names import impute_names
from osf.utils.requests import check_select_for_update
from osf.utils.permissions import API_CONTRIBUTOR_PERMISSIONS, MANAGER, MEMBER, MANAGE, ADMIN
from website import settings as website_settings
from website import filters, mails
from website.project import new_bookmark_collection
from website.util.metrics import OsfSourceTags
logger = logging.getLogger(__name__)
MAX_QUICKFILES_MERGE_RENAME_ATTEMPTS = 1000
def get_default_mailing_lists():
return {'Open Science Framework Help': True}
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
class OSFUserManager(BaseUserManager):
def create_user(self, username, password=None):
if not username:
raise ValueError('Users must have a username')
user = self.model(
username=self.normalize_email(username),
is_active=True,
date_registered=timezone.now()
)
user.set_password(password)
user.save(using=self._db)
return user
_queryset_class = GuidMixinQuerySet
def all(self):
return self.get_queryset().all()
def eager(self, *fields):
fk_fields = set(self.model.get_fk_field_names()) & set(fields)
m2m_fields = set(self.model.get_m2m_field_names()) & set(fields)
return self.select_related(*fk_fields).prefetch_related(*m2m_fields)
def create_superuser(self, username, password):
user = self.create_user(username, password=password)
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save(using=self._db)
return user
class Email(BaseModel):
address = LowercaseEmailField(unique=True, db_index=True, validators=[validate_email])
user = models.ForeignKey('OSFUser', related_name='emails', on_delete=models.CASCADE)
def __unicode__(self):
return self.address
class OSFUser(DirtyFieldsMixin, GuidMixin, BaseModel, AbstractBaseUser, PermissionsMixin, AddonModelMixin, SpamMixin):
FIELD_ALIASES = {
'_id': 'guids___id',
'system_tags': 'tags',
}
settings_type = 'user' # Needed for addons
USERNAME_FIELD = 'username'
# Node fields that trigger an update to the search engine on save
SEARCH_UPDATE_FIELDS = {
'fullname',
'given_name',
'middle_names',
'family_name',
'suffix',
'merged_by',
'date_disabled',
'date_confirmed',
'jobs',
'schools',
'social',
}
# Overrides DirtyFieldsMixin, Foreign Keys checked by '<attribute_name>_id' rather than typical name.
FIELDS_TO_CHECK = SEARCH_UPDATE_FIELDS.copy()
FIELDS_TO_CHECK.update({'password', 'last_login', 'merged_by_id', 'username'})
# TODO: Add SEARCH_UPDATE_NODE_FIELDS, for fields that should trigger a
# search update for all nodes to which the user is a contributor.
SOCIAL_FIELDS = {
'orcid': u'http://orcid.org/{}',
'github': u'http://github.com/{}',
'scholar': u'http://scholar.google.com/citations?user={}',
'twitter': u'http://twitter.com/{}',
'profileWebsites': [],
'linkedIn': u'https://www.linkedin.com/{}',
'impactStory': u'https://impactstory.org/u/{}',
'researcherId': u'http://researcherid.com/rid/{}',
'researchGate': u'https://researchgate.net/profile/{}',
'academiaInstitution': u'https://{}',
'academiaProfileID': u'.academia.edu/{}',
'baiduScholar': u'http://xueshu.baidu.com/scholarID/{}',
'ssrn': u'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'
}
SPAM_USER_PROFILE_FIELDS = {
'schools': ['degree', 'institution', 'department'],
'jobs': ['title', 'institution', 'department']
}
# The primary email address for the account.
# This value is unique, but multiple "None" records exist for:
# * unregistered contributors where an email address was not provided.
# TODO: Update mailchimp subscription on username change in user.save()
# TODO: Consider making this a FK to Email with to_field='address'
# Django supports this (https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#django.contrib.auth.models.CustomUser.USERNAME_FIELD)
# but some third-party apps may not.
username = models.CharField(max_length=255, db_index=True, unique=True)
# Hashed. Use `User.set_password` and `User.check_password`
# password = models.CharField(max_length=255)
fullname = models.CharField(max_length=255)
# user has taken action to register the account
is_registered = models.BooleanField(db_index=True, default=False)
# for internal use
tags = models.ManyToManyField('Tag', blank=True)
# security emails that have been sent
# TODO: This should be removed and/or merged with system_tags
security_messages = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <message label>: <datetime>
# ...
# }
# user was invited (as opposed to registered unprompted)
is_invited = models.BooleanField(default=False, db_index=True)
# Per-project unclaimed user data:
# TODO: add validation
unclaimed_records = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <project_id>: {
# 'name': <name that referrer provided>,
# 'referrer_id': <user ID of referrer>,
# 'token': <token used for verification urls>,
# 'email': <email the referrer provided or None>,
# 'claimer_email': <email the claimer entered or None>,
# 'last_sent': <timestamp of last email sent to referrer or None>
# }
# ...
# }
# Time of last sent notification email to newly added contributors
# Format : {
# <project_id>: {
# 'last_sent': time.time()
# }
# ...
# }
contributor_added_email_records = DateTimeAwareJSONField(default=dict, blank=True)
# Tracks last email sent where user was added to an OSF Group
member_added_email_records = DateTimeAwareJSONField(default=dict, blank=True)
# Tracks last email sent where an OSF Group was connected to a node
group_connected_email_records = DateTimeAwareJSONField(default=dict, blank=True)
# The user into which this account was merged
merged_by = models.ForeignKey('self', null=True, blank=True, related_name='merger', on_delete=models.CASCADE)
# verification key v1: only the token string, no expiration time
# used for cas login with username and verification key
verification_key = models.CharField(max_length=255, null=True, blank=True)
# verification key v2: token, and expiration time
# used for password reset, confirm account/email, claim account/contributor-ship
verification_key_v2 = DateTimeAwareJSONField(default=dict, blank=True, null=True)
# Format: {
# 'token': <verification token>
# 'expires': <verification expiration time>
# }
email_last_sent = NonNaiveDateTimeField(null=True, blank=True)
change_password_last_attempt = NonNaiveDateTimeField(null=True, blank=True)
# Logs number of times user attempted to change their password where their
# old password was invalid
old_password_invalid_attempts = models.PositiveIntegerField(default=0)
# email verification tokens
# see also ``unconfirmed_emails``
email_verifications = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <token> : {'email': <email address>,
# 'expiration': <datetime>}
# }
# email lists to which the user has chosen a subscription setting
mailchimp_mailing_lists = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# email lists to which the user has chosen a subscription setting,
# being sent from osf, rather than mailchimp
osf_mailing_lists = DateTimeAwareJSONField(default=get_default_mailing_lists, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# the date this user was registered
date_registered = NonNaiveDateTimeField(db_index=True, auto_now_add=True)
# list of collaborators that this user recently added to nodes as a contributor
# recently_added = fields.ForeignField("user", list=True)
recently_added = models.ManyToManyField('self',
through=RecentlyAddedContributor,
through_fields=('user', 'contributor'),
symmetrical=False)
# Attached external accounts (OAuth)
# external_accounts = fields.ForeignField("externalaccount", list=True)
external_accounts = models.ManyToManyField('ExternalAccount', blank=True)
# CSL names
given_name = models.CharField(max_length=255, blank=True)
middle_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
suffix = models.CharField(max_length=255, blank=True)
# identity for user logged in through external idp
external_identity = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <external_id_provider>: {
# <external_id>: <status from ('VERIFIED, 'CREATE', 'LINK')>,
# ...
# },
# ...
# }
# Employment history
jobs = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'title': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Educational history
schools = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'degree': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Social links
social = DateTimeAwareJSONField(default=dict, blank=True, validators=[validate_social])
# Format: {
# 'profileWebsites': <list of profile websites>
# 'twitter': <list of twitter usernames>,
# 'github': <list of github usernames>,
# 'linkedIn': <list of linkedin profiles>,
# 'orcid': <orcid for user>,
# 'researcherID': <researcherID>,
# 'impactStory': <impactStory identifier>,
# 'scholar': <google scholar identifier>,
# 'ssrn': <SSRN username>,
# 'researchGate': <researchGate username>,
# 'baiduScholar': <bauduScholar username>,
# 'academiaProfileID': <profile identifier for academia.edu>
# }
# date the user last sent a request
date_last_login = NonNaiveDateTimeField(null=True, blank=True, db_index=True)
# date the user first successfully confirmed an email address
date_confirmed = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was disabled.
date_disabled = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was soft-deleted (GDPR)
deleted = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# when comments were last viewed
comments_viewed_timestamp = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'Comment.root_target._id': 'timestamp',
# ...
# }
# timezone for user's locale (e.g. 'America/New_York')
timezone = models.CharField(blank=True, default='Etc/UTC', max_length=255)
# user language and locale data (e.g. 'en_US')
locale = models.CharField(blank=True, max_length=255, default='en_US')
# whether the user has requested to deactivate their account
requested_deactivation = models.BooleanField(default=False)
# whether the user has who requested deactivation has been contacted about their pending request. This is reset when
# requests are canceled
contacted_deactivation = models.BooleanField(default=False)
affiliated_institutions = models.ManyToManyField('Institution', blank=True)
notifications_configured = DateTimeAwareJSONField(default=dict, blank=True)
# The time at which the user agreed to our updated ToS and Privacy Policy (GDPR, 25 May 2018)
accepted_terms_of_service = NonNaiveDateTimeField(null=True, blank=True)
chronos_user_id = models.TextField(null=True, blank=True, db_index=True)
# The primary department to which the institution user belongs,
# in case we support multiple departments in the future.
department = models.TextField(null=True, blank=True)
objects = OSFUserManager()
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
def __repr__(self):
return '<OSFUser({0!r}) with guid {1!r}>'.format(self.username, self._id)
@property
def deep_url(self):
"""Used for GUID resolution."""
return '/profile/{}/'.format(self._primary_key)
@property
def url(self):
return '/{}/'.format(self._id)
@property
def absolute_url(self):
return urljoin(website_settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
from website import util
return util.api_v2_url('users/{}/'.format(self._id))
@property
def api_url(self):
return '/api/v1/profile/{}/'.format(self._id)
@property
def profile_url(self):
return '/{}/'.format(self._id)
@property
def is_disabled(self):
return self.date_disabled is not None
@is_disabled.setter
def is_disabled(self, val):
"""Set whether or not this account has been disabled."""
if val and not self.date_disabled:
self.date_disabled = timezone.now()
elif val is False:
self.date_disabled = None
@property
def is_confirmed(self):
return bool(self.date_confirmed)
@property
def is_merged(self):
"""Whether or not this account has been merged into another account.
"""
return self.merged_by is not None
@property
def unconfirmed_emails(self):
# Handle when email_verifications field is None
email_verifications = self.email_verifications or {}
return [
each['email']
for each
in email_verifications.values()
]
@property
def social_links(self):
"""
Returns a dictionary of formatted social links for a user.
Social account values which are stored as account names are
formatted into appropriate social links. The 'type' of each
respective social field value is dictated by self.SOCIAL_FIELDS.
I.e. If a string is expected for a specific social field that
permits multiple accounts, a single account url will be provided for
the social field to ensure adherence with self.SOCIAL_FIELDS.
"""
social_user_fields = {}
for key, val in self.social.items():
if val and key in self.SOCIAL_FIELDS:
if isinstance(self.SOCIAL_FIELDS[key], basestring):
if isinstance(val, basestring):
social_user_fields[key] = self.SOCIAL_FIELDS[key].format(val)
else:
# Only provide the first url for services where multiple accounts are allowed
social_user_fields[key] = self.SOCIAL_FIELDS[key].format(val[0])
else:
if isinstance(val, basestring):
social_user_fields[key] = [val]
else:
social_user_fields[key] = val
return social_user_fields
@property
def given_name_initial(self):
"""
The user's preferred initialization of their given name.
Some users with common names may choose to distinguish themselves from
their colleagues in this way. For instance, there could be two
well-known researchers in a single field named "Robert Walker".
"Walker, R" could then refer to either of them. "Walker, R.H." could
provide easy disambiguation.
NOTE: The internal representation for this should never end with a
period. "R" and "R.H" would be correct in the prior case, but
"R.H." would not.
"""
return self.given_name[0]
@property
def email(self):
if self.has_usable_username():
return self.username
else:
return None
@property
def all_tags(self):
"""Return a queryset containing all of this user's tags (incl. system tags)."""
# Tag's default manager only returns non-system tags, so we can't use self.tags
return Tag.all_tags.filter(osfuser=self)
@property
def system_tags(self):
"""The system tags associated with this node. This currently returns a list of string
names for the tags, for compatibility with v1. Eventually, we can just return the
QuerySet.
"""
return self.all_tags.filter(system=True).values_list('name', flat=True)
@property
def csl_given_name(self):
return utils.generate_csl_given_name(self.given_name, self.middle_names, self.suffix)
def csl_name(self, node_id=None):
# disabled users are set to is_registered = False but have a fullname
if self.is_registered or self.is_disabled:
name = self.fullname
else:
name = self.get_unclaimed_record(node_id)['name']
if self.family_name and self.given_name:
"""If the user has a family and given name, use those"""
return {
'family': self.family_name,
'given': self.csl_given_name,
}
else:
""" If the user doesn't autofill his family and given name """
parsed = utils.impute_names(name)
given_name = parsed['given']
middle_names = parsed['middle']
family_name = parsed['family']
suffix = parsed['suffix']
csl_given_name = utils.generate_csl_given_name(given_name, middle_names, suffix)
return {
'family': family_name,
'given': csl_given_name,
}
@property
def osfstorage_region(self):
from addons.osfstorage.models import Region
osfs_settings = self._settings_model('osfstorage')
region_subquery = osfs_settings.objects.get(owner=self.id).default_region_id
return Region.objects.get(id=region_subquery)
@property
def contributor_to(self):
"""
Nodes that user has perms to through contributorship - group membership not factored in
"""
return self.nodes.filter(is_deleted=False, type__in=['osf.node', 'osf.registration'])
@property
def visible_contributor_to(self):
"""
Nodes where user is a bibliographic contributor (group membership not factored in)
"""
return self.nodes.annotate(
self_is_visible=Exists(Contributor.objects.filter(node_id=OuterRef('id'), user_id=self.id, visible=True))
).filter(deleted__isnull=True, self_is_visible=True, type__in=['osf.node', 'osf.registration'])
@property
def all_nodes(self):
"""
Return all AbstractNodes that the user has explicit permissions to - either through contributorship or group membership
- similar to guardian.get_objects_for_user(self, READ_NODE, AbstractNode, with_superuser=False), but not looking at
NodeUserObjectPermissions, just NodeGroupObjectPermissions.
"""
from osf.models import AbstractNode
return AbstractNode.objects.get_nodes_for_user(self)
@property
def contributor_or_group_member_to(self):
"""
Nodes and registrations that user has perms to through contributorship or group membership
"""
return self.all_nodes.filter(type__in=['osf.node', 'osf.registration'])
@property
def nodes_contributor_or_group_member_to(self):
"""
Nodes that user has perms to through contributorship or group membership
"""
from osf.models import Node
return Node.objects.get_nodes_for_user(self)
def set_unusable_username(self):
"""Sets username to an unusable value. Used for, e.g. for invited contributors
and merged users.
NOTE: This is necessary because Django does not allow the username column to be nullable.
"""
if self._id:
self.username = self._id
else:
self.username = str(uuid.uuid4())
return self.username
def has_usable_username(self):
return '@' in self.username
@property
def is_authenticated(self): # Needed for django compat
return True
@property
def is_anonymous(self):
return False
@property
def osf_groups(self):
"""
OSFGroups that the user belongs to
"""
OSFGroup = apps.get_model('osf.OSFGroup')
return get_objects_for_user(self, 'member_group', OSFGroup, with_superuser=False)
def group_role(self, group):
"""
For the given OSFGroup, return the user's role - either member or manager
"""
if group.is_manager(self):
return MANAGER
elif group.is_member(self):
return MEMBER
else:
return None
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_addon_names(self):
return []
# django methods
def get_full_name(self):
return self.fullname
def get_short_name(self):
return self.username
def __unicode__(self):
return self.get_short_name()
def __str__(self):
return self.get_short_name()
def get_verified_external_id(self, external_service, verified_only=False):
identifier_info = self.external_identity.get(external_service, {})
for external_id, status in identifier_info.items():
if status and status == 'VERIFIED' or not verified_only:
return external_id
return None
@property
def contributed(self):
return self.nodes.all()
@property
def can_be_merged(self):
"""The ability of the `merge_user` method to fully merge the user"""
return all((addon.can_be_merged for addon in self.get_addons()))
def merge_user(self, user):
"""Merge a registered user into this account. This user will be
a contributor on any project. if the registered user and this account
are both contributors of the same project. Then it will remove the
registered user and set this account to the highest permission of the two
and set this account to be visible if either of the two are visible on
the project.
:param user: A User object to be merged.
"""
# Attempt to prevent self merges which end up removing self as a contributor from all projects
if self == user:
raise ValueError('Cannot merge a user into itself')
# Fail if the other user has conflicts.
if not user.can_be_merged:
raise MergeConflictError('Users cannot be merged')
# Move over the other user's attributes
# TODO: confirm
for system_tag in user.system_tags.all():
self.add_system_tag(system_tag)
self.is_registered = self.is_registered or user.is_registered
self.is_invited = self.is_invited or user.is_invited
self.is_superuser = self.is_superuser or user.is_superuser
self.is_staff = self.is_staff or user.is_staff
# copy over profile only if this user has no profile info
if user.jobs and not self.jobs:
self.jobs = user.jobs
if user.schools and not self.schools:
self.schools = user.schools
if user.social and not self.social:
self.social = user.social
unclaimed = user.unclaimed_records.copy()
unclaimed.update(self.unclaimed_records)
self.unclaimed_records = unclaimed
# - unclaimed records should be connected to only one user
user.unclaimed_records = {}
security_messages = user.security_messages.copy()
security_messages.update(self.security_messages)
self.security_messages = security_messages
notifications_configured = user.notifications_configured.copy()
notifications_configured.update(self.notifications_configured)
self.notifications_configured = notifications_configured
if not website_settings.RUNNING_MIGRATION:
for key, value in user.mailchimp_mailing_lists.items():
# subscribe to each list if either user was subscribed
subscription = value or self.mailchimp_mailing_lists.get(key)
signals.user_merged.send(self, list_name=key, subscription=subscription)
# clear subscriptions for merged user
signals.user_merged.send(user, list_name=key, subscription=False, send_goodbye=False)
for target_id, timestamp in user.comments_viewed_timestamp.items():
if not self.comments_viewed_timestamp.get(target_id):
self.comments_viewed_timestamp[target_id] = timestamp
elif timestamp > self.comments_viewed_timestamp[target_id]:
self.comments_viewed_timestamp[target_id] = timestamp
# Give old user's emails to self
user.emails.update(user=self)
for k, v in user.email_verifications.items():
email_to_confirm = v['email']
if k not in self.email_verifications and email_to_confirm != user.username:
self.email_verifications[k] = v
user.email_verifications = {}
self.affiliated_institutions.add(*user.affiliated_institutions.values_list('pk', flat=True))
for service in user.external_identity:
for service_id in user.external_identity[service].keys():
if not (
service_id in self.external_identity.get(service, '') and
self.external_identity[service][service_id] == 'VERIFIED'
):
# Prevent 'CREATE', merging user has already been created.
external = user.external_identity[service][service_id]
status = 'VERIFIED' if external == 'VERIFIED' else 'LINK'
if self.external_identity.get(service):
self.external_identity[service].update(
{service_id: status}
)
else:
self.external_identity[service] = {
service_id: status
}
user.external_identity = {}
# FOREIGN FIELDS
self.external_accounts.add(*user.external_accounts.values_list('pk', flat=True))
# - addons
# Note: This must occur before the merged user is removed as a
# contributor on the nodes, as an event hook is otherwise fired
# which removes the credentials.
for addon in user.get_addons():
user_settings = self.get_or_add_addon(addon.config.short_name)
user_settings.merge(addon)
user_settings.save()
# - projects where the user was a contributor (group member only are not included).
for node in user.contributed:
# Skip quickfiles
if node.is_quickfiles:
continue
user_perms = Contributor(node=node, user=user).permission
# if both accounts are contributor of the same project
if node.is_contributor(self) and node.is_contributor(user):
self_perms = Contributor(node=node, user=self).permission
permissions = API_CONTRIBUTOR_PERMISSIONS[max(API_CONTRIBUTOR_PERMISSIONS.index(user_perms), API_CONTRIBUTOR_PERMISSIONS.index(self_perms))]
node.set_permissions(user=self, permissions=permissions)
visible1 = self._id in node.visible_contributor_ids
visible2 = user._id in node.visible_contributor_ids
if visible1 != visible2:
node.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
node.contributor_set.filter(user=user).delete()
else:
node.contributor_set.filter(user=user).update(user=self)
node.add_permission(self, user_perms)
node.remove_permission(user, user_perms)
node.save()
# Skip bookmark collections
user.collection_set.exclude(is_bookmark_collection=True).update(creator=self)
from osf.models import QuickFilesNode
from osf.models import BaseFileNode
# - projects where the user was the creator
user.nodes_created.exclude(type=QuickFilesNode._typedmodels_type).update(creator=self)
# - file that the user has checked_out, import done here to prevent import error
for file_node in BaseFileNode.files_checked_out(user=user):
file_node.checkout = self
file_node.save()
# Transfer user's preprints
self._merge_users_preprints(user)
# Transfer user's draft registrations
self._merge_user_draft_registrations(user)
# transfer group membership
for group in user.osf_groups:
if not group.is_manager(self):
if group.has_permission(user, MANAGE):
group.make_manager(self)
else:
group.make_member(self)
group.remove_member(user)
# finalize the merge
remove_sessions_for_user(user)
# - username is set to the GUID so the merging user can set it primary
# in the future (note: it cannot be set to None due to non-null constraint)
user.set_unusable_username()
user.set_unusable_password()
user.verification_key = None
user.osf_mailing_lists = {}
user.merged_by = self
user.save()
def _merge_users_preprints(self, user):
"""
Preprints use guardian. The PreprintContributor table stores order and bibliographic information.
Permissions are stored on guardian tables. PreprintContributor information needs to be transferred
from user -> self, and preprint permissions need to be transferred from user -> self.
"""
from osf.models.preprint import PreprintContributor
# Loop through `user`'s preprints
for preprint in user.preprints.all():
user_contributor = PreprintContributor.objects.get(preprint=preprint, user=user)
user_perms = user_contributor.permission
# Both `self` and `user` are contributors on the preprint
if preprint.is_contributor(self) and preprint.is_contributor(user):
self_contributor = PreprintContributor.objects.get(preprint=preprint, user=self)
self_perms = self_contributor.permission
max_perms_index = max(API_CONTRIBUTOR_PERMISSIONS.index(self_perms), API_CONTRIBUTOR_PERMISSIONS.index(user_perms))
# Add the highest of `self` perms or `user` perms to `self`
preprint.set_permissions(user=self, permissions=API_CONTRIBUTOR_PERMISSIONS[max_perms_index])
if not self_contributor.visible and user_contributor.visible:
# if `self` is not visible, but `user` is visible, make `self` visible.
preprint.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
# Now that perms and bibliographic info have been transferred to `self` contributor,
# delete `user` contributor
user_contributor.delete()
else:
# `self` is not a contributor, but `user` is. Transfer `user` permissions and
# contributor information to `self`. Remove permissions from `user`.
preprint.contributor_set.filter(user=user).update(user=self)
preprint.add_permission(self, user_perms)
if preprint.creator == user:
preprint.creator = self
preprint.remove_permission(user, user_perms)
preprint.save()
@property
def draft_registrations_active(self):
"""
Active draft registrations attached to a user (user is a contributor)
"""
return self.draft_registrations.filter(
(models.Q(registered_node__isnull=True) | models.Q(registered_node__deleted__isnull=False)),
branched_from__deleted__isnull=True,
deleted__isnull=True,
)
def _merge_user_draft_registrations(self, user):
"""
Draft Registrations have contributors, and this model uses guardian.
The DraftRegistrationContributor table stores order and bibliographic information.
Permissions are stored on guardian tables. DraftRegistration information needs to be transferred
from user -> self, and draft registration permissions need to be transferred from user -> self.
"""
from osf.models import DraftRegistrationContributor
# Loop through `user`'s draft registrations
for draft_reg in user.draft_registrations.all():
user_contributor = DraftRegistrationContributor.objects.get(draft_registration=draft_reg, user=user)
user_perms = user_contributor.permission
# Both `self` and `user` are contributors on the draft reg
if draft_reg.is_contributor(self) and draft_reg.is_contributor(user):
self_contributor = DraftRegistrationContributor.objects.get(draft_registration=draft_reg, user=self)
self_perms = self_contributor.permission
max_perms_index = max(API_CONTRIBUTOR_PERMISSIONS.index(self_perms), API_CONTRIBUTOR_PERMISSIONS.index(user_perms))
# Add the highest of `self` perms or `user` perms to `self`
draft_reg.set_permissions(user=self, permissions=API_CONTRIBUTOR_PERMISSIONS[max_perms_index])
if not self_contributor.visible and user_contributor.visible:
# if `self` is not visible, but `user` is visible, make `self` visible.
draft_reg.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
# Now that perms and bibliographic info have been transferred to `self` contributor,
# delete `user` contributor
user_contributor.delete()
else:
# `self` is not a contributor, but `user` is. Transfer `user` permissions and
# contributor information to `self`. Remove permissions from `user`.
draft_reg.contributor_set.filter(user=user).update(user=self)
draft_reg.add_permission(self, user_perms)
if draft_reg.initiator == user:
draft_reg.initiator = self
draft_reg.remove_permission(user, user_perms)
draft_reg.save()
def deactivate_account(self):
"""
Disables user account, making is_disabled true, while also unsubscribing user
from mailchimp emails, remove any existing sessions.
Ported from framework/auth/core.py
"""
from website import mailchimp_utils
from framework.auth import logout
try:
mailchimp_utils.unsubscribe_mailchimp(
list_name=website_settings.MAILCHIMP_GENERAL_LIST,
user_id=self._id,
username=self.username
)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
pass
except mailchimp_utils.mailchimp.InvalidApiKeyError:
if not website_settings.ENABLE_EMAIL_SUBSCRIPTIONS:
pass
else:
raise
except mailchimp_utils.mailchimp.EmailNotExistsError:
pass
# Call to `unsubscribe` above saves, and can lead to stale data
self.reload()
self.is_disabled = True
# we must call both methods to ensure the current session is cleared and all existing
# sessions are revoked.
req = get_current_request()
if isinstance(req, FlaskRequest):
logout()
remove_sessions_for_user(self)
def reactivate_account(self):
"""
Enable user account
"""
self.is_disabled = False
self.requested_deactivation = False
from website.mailchimp_utils import subscribe_on_confirm
subscribe_on_confirm(self)
def update_is_active(self):
"""Update ``is_active`` to be consistent with the fields that
it depends on.
"""
# The user can log in if they have set a password OR
# have a verified external ID, e.g an ORCID
can_login = self.has_usable_password() or (
'VERIFIED' in sum([list(each.values()) for each in self.external_identity.values()], [])
)
self.is_active = (
self.is_registered and
self.is_confirmed and
can_login and
not self.is_merged and
not self.is_disabled
)
# Overrides BaseModel
def save(self, *args, **kwargs):
from website import mailchimp_utils
self.update_is_active()
self.username = self.username.lower().strip() if self.username else None
dirty_fields = set(self.get_dirty_fields(check_relationship=True))
ret = super(OSFUser, self).save(*args, **kwargs)
if self.SEARCH_UPDATE_FIELDS.intersection(dirty_fields) and self.is_confirmed:
self.update_search()
self.update_search_nodes_contributors()
if 'fullname' in dirty_fields:
from osf.models.quickfiles import get_quickfiles_project_title, QuickFilesNode
quickfiles = QuickFilesNode.objects.filter(creator=self).first()
if quickfiles:
quickfiles.title = get_quickfiles_project_title(self)
quickfiles.save()
if 'username' in dirty_fields:
for list_name, subscription in self.mailchimp_mailing_lists.items():
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, self._id)
return ret
# Legacy methods
@classmethod
def create(cls, username, password, fullname, accepted_terms_of_service=None):
validate_email(username) # Raises BlockedEmailError if spam address
user = cls(
username=username,
fullname=fullname,
accepted_terms_of_service=accepted_terms_of_service
)
user.update_guessed_names()
user.set_password(password)
return user
def set_password(self, raw_password, notify=True):
"""Set the password for this user to the hash of ``raw_password``.
If this is a new user, we're done. If this is a password change,
then email the user about the change and clear all the old sessions
so that users will have to log in again with the new password.
:param raw_password: the plaintext value of the new password
:param notify: Only meant for unit tests to keep extra notifications from being sent
:rtype: list
:returns: Changed fields from the user save
"""
had_existing_password = bool(self.has_usable_password() and self.is_confirmed)
if self.username == raw_password:
raise ChangePasswordError(['Password cannot be the same as your email address'])
super(OSFUser, self).set_password(raw_password)
if had_existing_password and notify:
mails.send_mail(
to_addr=self.username,
mail=mails.PASSWORD_RESET,
user=self,
can_change_preferences=False,
osf_contact_email=website_settings.OSF_CONTACT_EMAIL
)
remove_sessions_for_user(self)
@classmethod
def create_unconfirmed(cls, username, password, fullname, external_identity=None,
do_confirm=True, campaign=None, accepted_terms_of_service=None):
"""Create a new user who has begun registration but needs to verify
their primary email address (username).
"""
user = cls.create(username, password, fullname, accepted_terms_of_service)
user.add_unconfirmed_email(username, external_identity=external_identity)
user.is_registered = False
if external_identity:
user.external_identity.update(external_identity)
if campaign:
# needed to prevent cirular import
from framework.auth.campaigns import system_tag_for_campaign # skipci
# User needs to be saved before adding system tags (due to m2m relationship)
user.save()
user.add_system_tag(system_tag_for_campaign(campaign))
else:
user.save()
user.add_system_tag(OsfSourceTags.Osf.value)
return user
@classmethod
def create_confirmed(cls, username, password, fullname):
user = cls.create(username, password, fullname)
user.is_registered = True
user.save() # Must save before using auto_now_add field
user.date_confirmed = user.date_registered
user.emails.create(address=username.lower().strip())
return user
def get_unconfirmed_email_for_token(self, token):
"""Return email if valid.
:rtype: bool
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: InvalidTokenError if trying to access a token that is invalid.
"""
if token not in self.email_verifications:
raise InvalidTokenError
verification = self.email_verifications[token]
# Not all tokens are guaranteed to have expiration dates
if (
'expiration' in verification and
verification['expiration'].replace(tzinfo=pytz.utc) < timezone.now()
):
raise ExpiredTokenError
return verification['email']
def get_unconfirmed_emails_exclude_external_identity(self):
"""Return a list of unconfirmed emails that are not related to external identity."""
unconfirmed_emails = []
if self.email_verifications:
for token, value in self.email_verifications.items():
if not value.get('external_identity'):
unconfirmed_emails.append(value.get('email'))
return unconfirmed_emails
@property
def unconfirmed_email_info(self):
"""Return a list of dictionaries containing information about each of this
user's unconfirmed emails.
"""
unconfirmed_emails = []
email_verifications = self.email_verifications or []
for token in email_verifications:
if self.email_verifications[token].get('confirmed', False):
try:
user_merge = OSFUser.objects.get(emails__address__iexact=self.email_verifications[token]['email'])
except OSFUser.DoesNotExist:
user_merge = False
unconfirmed_emails.append({'address': self.email_verifications[token]['email'],
'token': token,
'confirmed': self.email_verifications[token]['confirmed'],
'user_merge': user_merge.email if user_merge else False})
return unconfirmed_emails
def clean_email_verifications(self, given_token=None):
email_verifications = deepcopy(self.email_verifications or {})
for token in self.email_verifications or {}:
try:
self.get_unconfirmed_email_for_token(token)
except (KeyError, ExpiredTokenError):
email_verifications.pop(token)
continue
if token == given_token:
email_verifications.pop(token)
self.email_verifications = email_verifications
def verify_password_token(self, token):
"""
Verify that the password reset token for this user is valid.
:param token: the token in verification key
:return `True` if valid, otherwise `False`
"""
if token and self.verification_key_v2:
try:
return (self.verification_key_v2['token'] == token and
self.verification_key_v2['expires'] > timezone.now())
except AttributeError:
return False
return False
def verify_claim_token(self, token, project_id):
"""Return whether or not a claim token is valid for this user for
a given node which they were added as a unregistered contributor for.
"""
try:
record = self.get_unclaimed_record(project_id)
except ValueError: # No unclaimed record for given pid
return False
return record['token'] == token
@classmethod
def create_unregistered(cls, fullname, email=None):
"""Create a new unregistered user.
"""
user = cls(
username=email,
fullname=fullname,
is_invited=True,
is_registered=False,
)
if not email:
user.set_unusable_username()
user.set_unusable_password()
user.update_guessed_names()
return user
def update_guessed_names(self):
"""Updates the CSL name fields inferred from the the full name.
"""
parsed = impute_names(self.fullname)
self.given_name = parsed['given']
self.middle_names = parsed['middle']
self.family_name = parsed['family']
self.suffix = parsed['suffix']
def add_unconfirmed_email(self, email, expiration=None, external_identity=None):
"""
Add an email verification token for a given email.
:param email: the email to confirm
:param email: overwrite default expiration time
:param external_identity: the user's external identity
:return: a token
:raises: ValueError if email already confirmed, except for login through external idp.
"""
# Note: This is technically not compliant with RFC 822, which requires
# that case be preserved in the "local-part" of an address. From
# a practical standpoint, the vast majority of email servers do
# not preserve case.
# ref: https://tools.ietf.org/html/rfc822#section-6
email = email.lower().strip()
with reraise_django_validation_errors():
validate_email(email)
if not external_identity and self.emails.filter(address=email).exists():
raise ValueError('Email already confirmed to this user.')
# If the unconfirmed email is already present, refresh the token
if email in self.unconfirmed_emails:
self.remove_unconfirmed_email(email)
verification_key = generate_verification_key(verification_type='confirm')
# handle when email_verifications is None
if not self.email_verifications:
self.email_verifications = {}
self.email_verifications[verification_key['token']] = {
'email': email,
'confirmed': False,
'expiration': expiration if expiration else verification_key['expires'],
'external_identity': external_identity,
}
return verification_key['token']
def remove_unconfirmed_email(self, email):
"""Remove an unconfirmed email addresses and their tokens."""
for token, value in self.email_verifications.items():
if value.get('email') == email:
del self.email_verifications[token]
return True
return False
def remove_email(self, email):
"""Remove a confirmed email"""
if email == self.username:
raise PermissionsError("Can't remove primary email")
if self.emails.filter(address=email):
self.emails.filter(address=email).delete()
signals.user_email_removed.send(self, email=email, osf_contact_email=website_settings.OSF_CONTACT_EMAIL)
def get_confirmation_token(self, email, force=False, renew=False):
"""Return the confirmation token for a given email.
:param str email: The email to get the token for.
:param bool force: If an expired token exists for the given email, generate a new one and return it.
:param bool renew: Generate a new token and return it.
:return Return the confirmation token.
:raises: ExpiredTokenError if trying to access a token that is expired and force=False.
:raises: KeyError if there no token for the email.
"""
# TODO: Refactor "force" flag into User.get_or_add_confirmation_token
for token, info in self.email_verifications.items():
if info['email'].lower() == email.lower():
# Old records will not have an expiration key. If it's missing,
# assume the token is expired
expiration = info.get('expiration')
if renew:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
if not expiration or (expiration and expiration < timezone.now()):
if not force:
raise ExpiredTokenError('Token for email "{0}" is expired'.format(email))
else:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
return token
raise KeyError('No confirmation token for email "{0}"'.format(email))
def get_confirmation_url(self, email,
external=True,
force=False,
renew=False,
external_id_provider=None,
destination=None):
"""Return the confirmation url for a given email.
:param email: The email to confirm.
:param external: Use absolute or relative url.
:param force: If an expired token exists for the given email, generate a new one and return it.
:param renew: Generate a new token and return it.
:param external_id_provider: The external identity provider that authenticates the user.
:param destination: The destination page to redirect after confirmation
:return: Return the confirmation url.
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: KeyError if there is no token for the email.
"""
base = website_settings.DOMAIN if external else '/'
token = self.get_confirmation_token(email, force=force, renew=renew)
external = 'external/' if external_id_provider else ''
destination = '?{}'.format(urlencode({'destination': destination})) if destination else ''
return '{0}confirm/{1}{2}/{3}/{4}'.format(base, external, self._primary_key, token, destination)
def register(self, username, password=None, accepted_terms_of_service=None):
"""Registers the user.
"""
self.username = username
if password:
self.set_password(password)
if not self.emails.filter(address=username):
self.emails.create(address=username)
self.is_registered = True
self.date_confirmed = timezone.now()
if accepted_terms_of_service:
self.accepted_terms_of_service = timezone.now()
self.update_search()
self.update_search_nodes()
# Emit signal that a user has confirmed
signals.user_confirmed.send(self)
return self
def confirm_email(self, token, merge=False):
"""Confirm the email address associated with the token"""
email = self.get_unconfirmed_email_for_token(token)
# If this email is confirmed on another account, abort
try:
if check_select_for_update():
user_to_merge = OSFUser.objects.filter(emails__address=email).select_for_update().get()
else:
user_to_merge = OSFUser.objects.get(emails__address=email)
except OSFUser.DoesNotExist:
user_to_merge = None
if user_to_merge and merge:
self.merge_user(user_to_merge)
elif user_to_merge:
raise MergeConfirmedRequiredError(
'Merge requires confirmation',
user=self,
user_to_merge=user_to_merge,
)
# If another user has this email as its username, get it
try:
unregistered_user = OSFUser.objects.exclude(guids___id=self._id, guids___id__isnull=False).get(username=email)
except OSFUser.DoesNotExist:
unregistered_user = None
if unregistered_user:
self.merge_user(unregistered_user)
self.save()
unregistered_user.username = None
if not self.emails.filter(address=email).exists():
self.emails.create(address=email)
# Complete registration if primary email
if email.lower() == self.username.lower():
self.register(self.username)
self.date_confirmed = timezone.now()
# Revoke token
del self.email_verifications[token]
# TODO: We can't assume that all unclaimed records are now claimed.
# Clear unclaimed records, so user's name shows up correctly on
# all projects
self.unclaimed_records = {}
self.save()
self.update_search_nodes()
return True
def confirm_spam(self, save=True):
self.deactivate_account()
super().confirm_spam(save=save)
for node in self.nodes.filter(is_public=True, is_deleted=False).exclude(type='osf.quickfilesnode'):
node.confirm_spam(train_akismet=False)
for preprint in self.preprints.filter(is_public=True, deleted__isnull=True):
preprint.confirm_spam(train_akismet=False)
def confirm_ham(self, save=False):
self.reactivate_account()
super().confirm_ham(save=save)
for node in self.nodes.filter().exclude(type='osf.quickfilesnode'):
node.confirm_ham(save=save, train_akismet=False)
for preprint in self.preprints.filter():
preprint.confirm_ham(save=save, train_akismet=False)
@property
def is_assumed_ham(self):
user_email_addresses = self.emails.values_list('address', flat=True)
user_email_domains = [
# get everything after the @
address.rpartition('@')[2].lower()
for address in user_email_addresses
]
user_has_trusted_email = NotableDomain.objects.filter(
note=NotableDomain.Note.ASSUME_HAM_UNTIL_REPORTED,
domain__in=user_email_domains,
).exists()
return user_has_trusted_email
def update_search(self):
from website.search.search import update_user
update_user(self)
def update_search_nodes_contributors(self):
"""
Bulk update contributor name on all nodes on which the user is
a contributor.
:return:
"""
from website.search import search
search.update_contributors_async(self.id)
def update_search_nodes(self):
"""Call `update_search` on all nodes on which the user is a
contributor. Needed to add self to contributor lists in search upon
registration or claiming.
"""
# Group member names not listed on Node search result, just Group names, so don't
# need to update nodes where user has group member perms only
for node in self.contributor_to:
node.update_search()
for group in self.osf_groups:
group.update_search()
def update_date_last_login(self, login_time=None):
self.date_last_login = login_time or timezone.now()
def get_summary(self, formatter='long'):
return {
'user_fullname': self.fullname,
'user_profile_url': self.profile_url,
'user_display_name': name_formatters[formatter](self),
'user_is_registered': self.is_registered
}
def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
Source: https://github.com/django/django/blob/master/django/contrib/auth/base_user.py#L104
"""
def setter(raw_password):
self.set_password(raw_password, notify=False)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=['password'])
return check_password(raw_password, self.password, setter)
def change_password(self, raw_old_password, raw_new_password, raw_confirm_password):
"""Change the password for this user to the hash of ``raw_new_password``."""
raw_old_password = (raw_old_password or '').strip()
raw_new_password = (raw_new_password or '').strip()
raw_confirm_password = (raw_confirm_password or '').strip()
# TODO: Move validation to set_password
issues = []
if not self.check_password(raw_old_password):
self.old_password_invalid_attempts += 1
self.change_password_last_attempt = timezone.now()
issues.append('Old password is invalid')
elif raw_old_password == raw_new_password:
issues.append('Password cannot be the same')
elif raw_new_password == self.username:
issues.append('Password cannot be the same as your email address')
if not raw_old_password or not raw_new_password or not raw_confirm_password:
issues.append('Passwords cannot be blank')
elif len(raw_new_password) < 8:
issues.append('Password should be at least eight characters')
elif len(raw_new_password) > 256:
issues.append('Password should not be longer than 256 characters')
if raw_new_password != raw_confirm_password:
issues.append('Password does not match the confirmation')
if issues:
raise ChangePasswordError(issues)
self.set_password(raw_new_password)
self.reset_old_password_invalid_attempts()
if self.verification_key_v2:
self.verification_key_v2['expires'] = timezone.now()
# new verification key (v1) for CAS
self.verification_key = generate_verification_key(verification_type=None)
def reset_old_password_invalid_attempts(self):
self.old_password_invalid_attempts = 0
def profile_image_url(self, size=None):
"""A generalized method for getting a user's profile picture urls.
We may choose to use some service other than gravatar in the future,
and should not commit ourselves to using a specific service (mostly
an API concern).
As long as we use gravatar, this is just a proxy to User.gravatar_url
"""
return self._gravatar_url(size)
def _gravatar_url(self, size):
return filters.gravatar(
self,
use_ssl=True,
size=size
)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
def display_full_name(self, node=None):
"""Return the full name , as it would display in a contributor list for a
given node.
NOTE: Unclaimed users may have a different name for different nodes.
"""
if node:
unclaimed_data = self.unclaimed_records.get(str(node._id), None)
if unclaimed_data:
return unclaimed_data['name']
return self.fullname
def add_system_tag(self, tag):
if not isinstance(tag, Tag):
tag_instance, created = Tag.all_tags.get_or_create(name=tag.lower(), system=True)
else:
tag_instance = tag
if not tag_instance.system:
raise ValueError('Non-system tag passed to add_system_tag')
if not self.all_tags.filter(id=tag_instance.id).exists():
self.tags.add(tag_instance)
return tag_instance
def get_recently_added(self):
return (
each.contributor
for each in self.recentlyaddedcontributor_set.order_by('-date_added')
)
def _projects_in_common_query(self, other_user):
"""
Returns projects that both self and other_user have in common; both are either contributors or group members
"""
from osf.models import AbstractNode
return AbstractNode.objects.get_nodes_for_user(other_user, base_queryset=self.contributor_or_group_member_to).exclude(type='osf.collection')
def get_projects_in_common(self, other_user):
"""Returns either a collection of "shared projects" (projects that both users are contributors or group members for)
or just their primary keys
"""
query = self._projects_in_common_query(other_user)
return set(query.all())
def n_projects_in_common(self, other_user):
"""Returns number of "shared projects" (projects that both users are contributors or group members for)"""
return self._projects_in_common_query(other_user).count()
def add_unclaimed_record(self, claim_origin, referrer, given_name, email=None):
"""Add a new project entry in the unclaimed records dictionary.
:param object claim_origin: Object this unclaimed user was added to. currently `Node` or `Provider` or `Preprint`
:param User referrer: User who referred this user.
:param str given_name: The full name that the referrer gave for this user.
:param str email: The given email address.
:returns: The added record
"""
from osf.models.provider import AbstractProvider
from osf.models.osf_group import OSFGroup
if isinstance(claim_origin, AbstractProvider):
if not bool(get_perms(referrer, claim_origin)):
raise PermissionsError(
'Referrer does not have permission to add a moderator to provider {0}'.format(claim_origin._id)
)
elif isinstance(claim_origin, OSFGroup):
if not claim_origin.has_permission(referrer, MANAGE):
raise PermissionsError(
'Referrer does not have permission to add a member to {0}'.format(claim_origin._id)
)
else:
if not claim_origin.has_permission(referrer, ADMIN):
raise PermissionsError(
'Referrer does not have permission to add a contributor to {0}'.format(claim_origin._id)
)
pid = str(claim_origin._id)
referrer_id = str(referrer._id)
if email:
clean_email = email.lower().strip()
else:
clean_email = None
verification_key = generate_verification_key(verification_type='claim')
try:
record = self.unclaimed_records[claim_origin._id]
except KeyError:
record = None
if record:
del record
record = {
'name': given_name,
'referrer_id': referrer_id,
'token': verification_key['token'],
'expires': verification_key['expires'],
'email': clean_email,
}
self.unclaimed_records[pid] = record
return record
def get_unclaimed_record(self, project_id):
"""Get an unclaimed record for a given project_id.
:raises: ValueError if there is no record for the given project.
"""
try:
return self.unclaimed_records[project_id]
except KeyError: # reraise as ValueError
raise ValueError('No unclaimed record for user {self._id} on node {project_id}'
.format(**locals()))
def get_claim_url(self, project_id, external=False):
"""Return the URL that an unclaimed user should use to claim their
account. Return ``None`` if there is no unclaimed_record for the given
project ID.
:param project_id: The project ID/preprint ID/OSF group ID for the unclaimed record
:raises: ValueError if a record doesn't exist for the given project ID
:rtype: dict
:returns: The unclaimed record for the project
"""
uid = self._primary_key
base_url = website_settings.DOMAIN if external else '/'
unclaimed_record = self.get_unclaimed_record(project_id)
token = unclaimed_record['token']
return '{base_url}user/{uid}/{project_id}/claim/?token={token}'\
.format(**locals())
def is_affiliated_with_institution(self, institution):
"""Return if this user is affiliated with ``institution``."""
return self.affiliated_institutions.filter(id=institution.id).exists()
def update_affiliated_institutions_by_email_domain(self):
"""
Append affiliated_institutions by email domain.
:return:
"""
try:
email_domains = [email.split('@')[1].lower() for email in self.emails.values_list('address', flat=True)]
insts = Institution.objects.filter(email_domains__overlap=email_domains)
if insts.exists():
self.affiliated_institutions.add(*insts)
except IndexError:
pass
def remove_institution(self, inst_id):
try:
inst = self.affiliated_institutions.get(_id=inst_id)
except Institution.DoesNotExist:
return False
else:
self.affiliated_institutions.remove(inst)
return True
def get_activity_points(self):
return analytics.get_total_activity_count(self._id)
def get_or_create_cookie(self, secret=None):
"""Find the cookie for the given user
Create a new session if no cookie is found
:param str secret: The key to sign the cookie with
:returns: The signed cookie
"""
secret = secret or settings.SECRET_KEY
user_session = Session.objects.filter(
data__auth_user_id=self._id
).order_by(
'-modified'
).first()
if not user_session:
user_session = Session(data={
'auth_user_id': self._id,
'auth_user_username': self.username,
'auth_user_fullname': self.fullname,
})
user_session.save()
signer = itsdangerous.Signer(secret)
return signer.sign(user_session._id)
@classmethod
def from_cookie(cls, cookie, secret=None):
"""Attempt to load a user from their signed cookie
:returns: None if a user cannot be loaded else User
"""
if not cookie:
return None
secret = secret or settings.SECRET_KEY
try:
session_id = ensure_str(itsdangerous.Signer(secret).unsign(cookie))
except itsdangerous.BadSignature:
return None
user_session = Session.load(session_id)
if user_session is None:
return None
return cls.load(user_session.data.get('auth_user_id'))
def get_node_comment_timestamps(self, target_id):
""" Returns the timestamp for when comments were last viewed on a node, file or wiki.
"""
default_timestamp = dt.datetime(1970, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
return self.comments_viewed_timestamp.get(target_id, default_timestamp)
def _get_spam_content(self, saved_fields):
content = []
for field, contents in saved_fields.items():
if field in self.SPAM_USER_PROFILE_FIELDS.keys():
for item in contents:
for key, value in item.items():
if key in self.SPAM_USER_PROFILE_FIELDS[field]:
content.append(value)
return ' '.join(content).strip()
def check_spam(self, saved_fields, request_headers):
if not website_settings.SPAM_CHECK_ENABLED:
return False
is_spam = False
if set(self.SPAM_USER_PROFILE_FIELDS.keys()).intersection(set(saved_fields.keys())):
content = self._get_spam_content(saved_fields)
if content:
is_spam = self.do_check_spam(
self.fullname,
self.username,
content,
request_headers
)
self.save()
return is_spam
def gdpr_delete(self):
"""
This function does not remove the user object reference from our database, but it does disable the account and
remove identifying in a manner compliant with GDPR guidelines.
Follows the protocol described in
https://openscience.atlassian.net/wiki/spaces/PRODUC/pages/482803755/GDPR-Related+protocols
"""
from osf.models import Preprint, AbstractNode
user_nodes = self.nodes.exclude(is_deleted=True)
# Validates the user isn't trying to delete things they deliberately made public.
if user_nodes.filter(type='osf.registration').exists():
raise UserStateError('You cannot delete this user because they have one or more registrations.')
if Preprint.objects.filter(_contributors=self, ever_public=True, deleted__isnull=True).exists():
raise UserStateError('You cannot delete this user because they have one or more preprints.')
# Validates that the user isn't trying to delete things nodes they are the only admin on.
personal_nodes = (
AbstractNode.objects.annotate(contrib_count=Count('_contributors'))
.filter(contrib_count__lte=1)
.filter(contributor__user=self)
.exclude(is_deleted=True)
)
shared_nodes = user_nodes.exclude(id__in=personal_nodes.values_list('id'))
for node in shared_nodes.exclude(type__in=['osf.quickfilesnode', 'osf.draftnode']):
alternate_admins = OSFUser.objects.filter(groups__name=node.format_group(ADMIN)).filter(is_active=True).exclude(id=self.id)
if not alternate_admins:
raise UserStateError(
'You cannot delete node {} because it would be a node with contributors, but with no admin.'.format(
node._id))
for addon in node.get_addons():
if addon.short_name not in ('osfstorage', 'wiki') and addon.user_settings and addon.user_settings.owner.id == self.id:
raise UserStateError('You cannot delete this user because they '
'have an external account for {} attached to Node {}, '
'which has other contributors.'.format(addon.short_name, node._id))
for group in self.osf_groups:
if not group.managers.exclude(id=self.id).filter(is_registered=True).exists() and group.members.exclude(id=self.id).exists():
raise UserStateError('You cannot delete this user because they are the only registered manager of OSFGroup {} that contains other members.'.format(group._id))
for node in shared_nodes.all():
logger.info('Removing {self._id} as a contributor to node (pk:{node_id})...'.format(self=self, node_id=node.pk))
node.remove_contributor(self, auth=Auth(self), log=False)
# This is doesn't to remove identifying info, but ensures other users can't see the deleted user's profile etc.
self.deactivate_account()
# delete all personal nodes (one contributor), bookmarks, quickfiles etc.
for node in personal_nodes.all():
logger.info('Soft-deleting node (pk: {node_id})...'.format(node_id=node.pk))
node.remove_node(auth=Auth(self))
for group in self.osf_groups:
if len(group.managers) == 1 and group.managers[0] == self:
group.remove_group()
else:
group.remove_member(self)
logger.info('Clearing identifying information...')
# This removes identifying info
# hard-delete all emails associated with the user
self.emails.all().delete()
# Change name to "Deleted user" so that logs render properly
self.fullname = 'Deleted user'
self.set_unusable_username()
self.set_unusable_password()
self.given_name = ''
self.family_name = ''
self.middle_names = ''
self.mailchimp_mailing_lists = {}
self.osf_mailing_lists = {}
self.verification_key = None
self.suffix = ''
self.jobs = []
self.schools = []
self.social = {}
self.unclaimed_records = {}
self.notifications_configured = {}
# Scrub all external accounts
if self.external_accounts.exists():
logger.info('Clearing identifying information from external accounts...')
for account in self.external_accounts.all():
account.oauth_key = None
account.oauth_secret = None
account.refresh_token = None
account.provider_name = 'gdpr-deleted'
account.display_name = None
account.profile_url = None
account.save()
self.external_accounts.clear()
self.external_identity = {}
self.deleted = timezone.now()
@property
def has_resources(self):
"""
This is meant to determine if a user has any resources, nodes, preprints etc that might impede their deactivation.
If a user only has no resources or only deleted resources this will return false and they can safely be deactivated
otherwise they must delete or transfer their outstanding resources.
:return bool: does the user have any active node, preprints, groups, etc?
"""
from osf.models import Preprint
nodes = self.nodes.filter(deleted__isnull=True).exists()
groups = self.osf_groups.exists()
preprints = Preprint.objects.filter(_contributors=self, ever_public=True, deleted__isnull=True).exists()
return groups or nodes or preprints
class Meta:
# custom permissions for use in the OSF Admin App
permissions = (
# Clashes with built-in permissions
# ('view_osfuser', 'Can view user details'),
)
@receiver(post_save, sender=OSFUser)
def add_default_user_addons(sender, instance, created, **kwargs):
if created:
for addon in website_settings.ADDONS_AVAILABLE:
if 'user' in addon.added_default:
instance.add_addon(addon.short_name)
@receiver(post_save, sender=OSFUser)
def create_bookmark_collection(sender, instance, created, **kwargs):
if created:
new_bookmark_collection(instance)
|
{
"content_hash": "94910db601753ca5e1ad8f4056d11278",
"timestamp": "",
"source": "github",
"line_count": 1926,
"max_line_length": 174,
"avg_line_length": 40.83644859813084,
"alnum_prop": 0.6253703067983878,
"repo_name": "CenterForOpenScience/osf.io",
"id": "bdf015b17806018b6b47328ee55ba4844f8a15df",
"size": "78651",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osf/models/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373895"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11640855"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
import redis
import pickle
try:
import json
except :
import simplejson as json
import UserDict, UserList
ConnectionError = redis.exceptions.ConnectionError
ResponseError = redis.exceptions.ResponseError
DEFAULT_ENCODING = 'UTF-8' # sys.getdefaultencoding()
#--- System related ----------------------------------------------
SYSTEMS = {
'default': redis.Redis(host='localhost', port=6379)
}
RECOVER_FUNCS = {'default':None}
def setup_redis(name, host, port, db=0, recover_func=None, **kw):
SYSTEMS[name] = redis.Redis(host=host, port=port, db=db, **kw)
RECOVER_FUNCS[name] = recover_func
def get_redis(system='default'):
return SYSTEMS[system]
def ha_redis(func):
""" 让redis访问高可用 """
def new_func(self, *args, **kw):
try:
return func(self, *args, **kw)
except ConnectionError, e:
recover_func = RECOVER_FUNCS[self.system]
if recover_func is not None:
recover_func(self.system)
return func(self, *args, **kw)
else:
raise
return new_func
#--- Decorators ----------------------------------------------
def get_list(name, system='default',serialized_type='json'):
return ListFu(name, system, serialized_type=serialized_type)
def get_queue(name, system='default',serialized_type='json'):
return QueueFu(name, system, serialized_type=serialized_type)
def get_limit_queue(name, length, system='default',serialized_type='json'):
return LimitQueueFu(name, length, system, serialized_type=serialized_type)
def get_hash(name, system='default',serialized_type='json'):
return HashFu(name, system, serialized_type=serialized_type)
def get_set(name, system='default',serialized_type='json'):
return SetFu(name, system, serialized_type=serialized_type)
def get_dict(name, system='default',serialized_type='json'):
return DictFu(name, system, serialized_type=serialized_type)
def get_key(name, system='default',serialized_type='json'):
loads = load_method[serialized_type]
value = get_redis(system).get(name)
try:
return loads(value)
except:return value
def del_key(name, system='default'):
get_redis(system).delete(name)
def get_keys(name, system='default'):
for key in get_redis(system).keys(name + "*"):
key_name = key[len(name):]
yield key_name
def set_key(name, value, system='default',serialized_type='json'):
dumps = dump_method[serialized_type]
value = dumps(value)
get_redis(system).set(name, value)
#---serialize data type----------------------------------------
def _convert_persistent_obj(obj):
# fix json.dumps raise TypeError
# 是persistent 对象
if isinstance(obj, (UserDict.UserDict, dict)):
return dict(obj)
elif isinstance(obj, (UserList.UserList, list, set)):
return list(obj)
raise TypeError, '%s: %s is not JSON serializable'%(type(obj), repr(obj))
dump_method = {'json':lambda item : json.dumps(item, sort_keys=True, \
encoding=DEFAULT_ENCODING, default=_convert_persistent_obj),
'pickle':pickle.dumps,
'string':str
}
load_method = {'json':json.loads,
'pickle':pickle.loads,
'string':str
}
#--- Data impl. ----------------------------------------------
class ListFu(object):
def __init__(self, name, system, serialized_type='json'):
self.name = name
self.system = system
self.type = serialized_type
self.dumps = dump_method[serialized_type]
self.loads = load_method[serialized_type]
@ha_redis
def append(self, item):
item = self.dumps(item)
get_redis(self.system).lpush(self.name, item)
@ha_redis
def extend(self, iterable):
for item in iterable:
self.append(item)
@ha_redis
def remove(self, value):
value = self.dumps(value)
get_redis(self.system).lrem(self.name, value)
@ha_redis
def pop(self, index=None):
if index:
raise ValueError('Not supported')
serialized_data = get_redis(self.system).rpop(self.name)
if serialized_data[1]:
item = self.loads(serialized_data[1])
return item
else: return None
@ha_redis
def __len__(self):
return get_redis(self.system).llen(self.name)
@ha_redis
def __iter__(self):
client = get_redis(self.system)
i = 0
while True:
items = client.lrange(self.name, i, i+30)
if len(items) == 0:
break
#raise StopIteration
for item in items:
yield self.loads(item)
i += 30
@ha_redis
def __getitem__(self, index):
client = get_redis(self.system)
value = client.lindex(self.name, index)
return self.loads(value) if value else None
@ha_redis
def __getslice__(self, i, j):
client = get_redis(self.system)
items = client.lrange(self.name, i, j)
for item in items:
yield self.loads(item)
class HashFu:
def __init__(self, name, system, serialized_type='json'):
self.name = name
self.system = system
self.dumps = dump_method[serialized_type]
self.loads = load_method[serialized_type]
@ha_redis
def get(self, key, default=None):
value = get_redis(self.system).hget(self.name, key)
try:
return self.loads(value)
except: return default
@ha_redis
def items(self):
for key in self.keys():
# key_list 不是实时的数据
# 这个任务可能已经被取走了(当监视这个队列的工作线程有多个的时候)
value = self.get(key)
if value is None: continue
yield key, value
@ha_redis
def keys(self):
return get_redis(self.system).hkeys(self.name) or []
@ha_redis
def values(self):
_values = self.loads(get_redis(self.system).hvals(self.name))
return _values or []
@ha_redis
def pop(self, key):
pline = get_redis(self.system).pipeline()
pline.hget(self.name, key).hdel(self.name, key)
_value, _expire = pline.execute()
if _expire:
return self.loads(_value)
else:
#raise KeyError,'redis hasher not match the %s key\n\n'%key
print 'redis hasher not match the %s key\n\n'%key
return None
@ha_redis
def __len__(self):
return get_redis(self.system).hlen(self.name) or 0
@ha_redis
def __getitem__(self, key):
val = self.get(key)
if not val:
raise KeyError
return val
@ha_redis
def __setitem__(self, key, value):
value = self.dumps(value)
return get_redis(self.system).hset(self.name, key, value)
@ha_redis
def __delitem__(self, key):
get_redis(self.system).hdel(self.name, key)
@ha_redis
def __contains__(self, key):
return get_redis(self.system).hexists(self.name, key)
@ha_redis
def update(self, new_dict, **kw):
update = {}
if new_dict and hasattr(new_dict, 'keys'):
for key in new_dict:
update[key] = self.dumps(new_dict[key])
elif new_dict:
for key, value in new_dict:
update[key] = self.dumps(key)
for key in kw:
update[key] = self.dumps(key[key])
if update:
get_redis(self.system).hmset(self.name, update)
class SetFu:
def __init__(self, name, system, serialized_type='json'):
self.name = name
self.system = system
self.dumps = dump_method[serialized_type]
self.loads = load_method[serialized_type]
@ha_redis
def add(self, item):
item = self.dumps(item)
get_redis(self.system).sadd(self.name, item)
@ha_redis
def remove(self, item):
item = self.dumps(item)
get_redis(self.system).srem(self.name, item)
@ha_redis
def pop(self, item):
item = self.serializer.dumps(item)
value = get_redis(self.system).spop(self.name, item)
return self.loads(value)
@ha_redis
def __iter__(self):
client = get_redis(self.system)
for item in client.smembers(self.name):
yield self.loads(item)
@ha_redis
def __len__(self):
return len(get_redis(self.system).smembers(self.name))
@ha_redis
def __contains__(self, item):
item = self.dumps(item)
return get_redis(self.system).sismember(self.name, item)
class DictFu:
def __init__(self, name, system, serialized_type='json'):
self.name = name
self.system = system
self.dumps = dump_method[serialized_type]
self.loads = load_method[serialized_type]
@ha_redis
def get(self, key, default=None):
value = get_redis(self.system).get(self.name+key)
try:
return self.loads(value)
except: return default
@ha_redis
def set(self, key, value):
value = self.dumps(value)
get_redis(self.system).set(self.name+key, value)
@ha_redis
def __delitem__(self, key):
get_redis(self.system).delete(self.name+key)
@ha_redis
def __len__(self):
listkey = get_redis(self.system).keys(self.name+"*")
return len(listkey) or 0
@ha_redis
def keys(self):
prefix_len = len(self.name)
return [key[prefix_len:] for key in get_redis(self.system).keys(self.name + "*")]
@ha_redis
def items(self):
# XXX self.get 每次都要连结redis, 这样不好
key_list = get_redis(self.system).keys(self.name+"*")
for key in key_list:
key_name = key[len(self.name):]
# key_list 不是实时的数据
# 这个任务可能已经被取走了(当监视这个队列的工作线程有多个的时候)
value = self.get(key_name)
if value is None: continue
yield key_name, value
@ha_redis
def __getitem__(self, key=''):
val = self.get(key, None)
if val is None:
raise KeyError
return val
@ha_redis
def __setitem__(self, key, value):
self.set(key, value)
@ha_redis
def __contains__(self, key):
return get_redis(self.system).exists(self.name+key)
class QueueFu(ListFu):
def __init__(self, name, system, serialized_type='json'):
super(QueueFu,self).__init__(name, system, serialized_type=serialized_type)
@ha_redis
def push(self, item, to_left=True):
if to_left:
self.append(item)
else:
item = self.dumps(item)
get_redis(self.system).rpush(self.name, item)
@ha_redis
def pop(self, timeout=0, from_right = True):
"""
得到redis list 对象中的一个item,并把item 从 redis list 对象中删除
from_right: 如果值为真,从redis list 对象右边读取,反之,从左边读取
timeout: timeout 等于大于0,以阻塞式获取。timeout 小于0,直接获取返回
"""
if from_right:
if timeout >= 0:
serialized_data = get_redis(self.system).brpop(self.name, timeout)
else:
serialized_data = get_redis(self.system).rpop(self.name)
else:
if timeout >= 0:
serialized_data = get_redis(self.system).blpop(self.name, timeout)
else:
serialized_data = get_redis(self.system).lpop(self.name)
if serialized_data:
# 阻塞式获取,返回self.name, result
if isinstance(serialized_data, (tuple, list, set)) and \
len(serialized_data) == 2:
return self.loads(serialized_data[1]) if serialized_data[1] else None
# 直接获取,返回 result
else:
return self.loads(serialized_data)
return None
@ha_redis
def reverse(self):
"""倒序输出结果
"""
client = get_redis(self.system)
length = client.llen(self.name)
for index in xrange(length-1, -1, -1):
item = client.lindex(self.name, index)
yield self.loads(item)
class LimitQueueFu(QueueFu):
"""此队列类用于控制队列长度,主要用于日志
"""
def __init__(self, name, length, system, serialized_type='json'):
super(LimitQueueFu,self).__init__(name, system, serialized_type=serialized_type)
self.length = length - 1
@ha_redis
def push(self, item):
#QueueFu.push(self, item)
#get_redis(self.system).ltrim(self.name, 0, self.length)
item = self.dumps(item)
pline = get_redis(self.system).pipeline()
pline.lpush(self.name, item).ltrim(self.name, 0, self.length)
pline.execute()
|
{
"content_hash": "e5b73f76e1a084d34720f3c80e61db38",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 89,
"avg_line_length": 31.07311320754717,
"alnum_prop": 0.5547628083491462,
"repo_name": "audoe/ztq",
"id": "3a0cfbae7e68724e2c2fc06e15948e9f427142e9",
"size": "13586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ztq_core/ztq_core/redis_wrap.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6911"
},
{
"name": "JavaScript",
"bytes": "367"
},
{
"name": "Python",
"bytes": "109781"
},
{
"name": "Visual Basic",
"bytes": "646"
}
],
"symlink_target": ""
}
|
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_title():
title = []
with open('/Users/jingyuanli/Desktop/DL/Project/data/classification_data/title.txt', 'r') as f:
lines = f.readlines()
print len(lines)
for line in lines:
title.append(line.strip())
return title
get_title()
|
{
"content_hash": "389c1ec7279437f4098f9ee4313c479c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 96,
"avg_line_length": 21.642857142857142,
"alnum_prop": 0.7029702970297029,
"repo_name": "hbtech-ai/ARPS",
"id": "a041d578707af87e413a245aa609a1e728b6cd61",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classification_data/classification_data/spiders/function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "502678"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
}
|
"""Creates a cache for the specified dataset by executing the gold queries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sqlite3
def main(dataset_name, cache_path, errors_filepath, splits):
if dataset_name == 'spider':
pass
else:
db = sqlite3.connect(os.path.join('databases', dataset_name + '.db'))
c = db.cursor()
cache = dict()
if os.path.exists(cache_path):
print('Reading existing cache from %s' % cache_path)
with open(cache_path) as infile:
cache = json.loads(infile.read())
num_empty = 0
num_queries = 0
with open(os.path.join(dataset_name, dataset_name + '.json')) as infile:
data = json.load(infile)
for query in data:
for example in query['sentences']:
if example['question-split'] not in splits:
continue
anon_sql = query['sql'][0]
nl = example['text']
for variable, value in sorted(
example['variables'].items(), key=lambda x: len(x[0]),
reverse=True):
if not value:
value = '%'
nl = nl.replace(variable, value)
print('%s\t%s' % (variable, value))
anon_sql = anon_sql.replace(variable, value)
anon_sql = anon_sql.replace('= "%"', 'LIKE "%"')
anon_sql = anon_sql.replace('= %', 'LIKE "%"')
if 'scholar' in dataset_name.lower():
new_pred = ''
last_quote = ''
for char in anon_sql:
new_pred += char
if char in {'"', '\''} and not last_quote:
last_quote = char
elif char == last_quote:
last_quote = ''
new_pred += ' COLLATE NOCASE'
anon_sql = new_pred
if 'advising' in dataset_name.lower():
# Fix so that it's selecting a concat of columns instead.
if 'count' in anon_sql.lower():
# Find range of count thing
count_start_idx = anon_sql.lower().index('count')
count_end_idx = (
count_start_idx + anon_sql.lower()[count_start_idx:].index(')'))
if ',' in anon_sql[count_start_idx:count_end_idx]:
problem_segment = anon_sql[count_start_idx:count_end_idx]
problem_segment = problem_segment.replace(',', '||')
anon_sql = (
anon_sql[:count_start_idx] + problem_segment +
anon_sql[count_end_idx:])
prev_token = ''
bad_tokens = set()
for token in anon_sql.split():
if prev_token == '=':
if (token[0] in {'"', '\''} and token[-1] in {'"', '\''} and
token[-2].isnumeric() and not token[1].isnumeric()):
bad_tokens.add(token)
elif token[-1].isnumeric() and not token[0].isnumeric():
bad_tokens.add(token)
prev_token = token
for token in bad_tokens:
anon_sql = anon_sql.replace('= ' + token, 'LIKE "%"')
if bad_tokens:
print(bad_tokens)
print(nl)
# Two specific exceptions on utterances that need correction or take a
# long time to process.
if nl == ('What is the number of businesses user Michelle reviews per '
'month ?'):
anon_sql = ('select count(distinct(review.text)), review.month from '
'review where review.user_id in (select user_id from '
'user where user.name = \'Michelle\') group by '
'review.month;')
if nl == ('return me the number of papers in " University of '
'Michigan " in Databases area .'):
results = '121572'
cache[anon_sql] = results
else:
if anon_sql not in cache:
# Update the cache to include this SQL query.
print(anon_sql)
try:
c.execute(anon_sql)
results = c.fetchall()
except sqlite3.OperationalError as e:
with open(errors_filepath) as f:
f.write(nl + '\n')
f.write(anon_sql + '\n')
f.write(str(e) + '\n\n')
results = list()
cache[anon_sql] = results
else:
results = cache[anon_sql]
if not results:
num_empty += 1
if ('advising' not in dataset_name and nl in cache and
cache[nl] != anon_sql):
print(nl)
print(anon_sql)
print(cache[nl])
keep_going = input('Allow this to happen? This utterance will be '
'mapped to the second query.').lower() == 'y'
if not keep_going:
raise ValueError('NL is the same but anonymized SQL is not.')
cache[nl] = anon_sql
num_queries += 1
print('Num empty: %s' % num_empty)
print('Total num queries: %s' % num_queries)
print('Prop empty: %2f' % (100. * num_empty / num_queries))
db.close()
print('Writing cache')
with open(cache_path + '.tmp', 'w') as ofile:
json.dump(cache, ofile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_name',
type=str,
help='The name of the dataset to create a cache for.')
parser.add_argument(
'--splits',
type=str,
help='Comma-separated list of split names to create a cache for.')
args = parser.parse_args()
main(args.dataset_name,
os.path.join(args.dataset_name, args.dataset_name + '_cache.json'),
os.path.join(args.dataset_name, 'exec_errors.txt'),
args.splits.split(','))
|
{
"content_hash": "7cd0de329866b9a521b384e06cbd9f97",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 80,
"avg_line_length": 34.68674698795181,
"alnum_prop": 0.5314345258770407,
"repo_name": "google-research/language",
"id": "69d2a11e2e62d314ef59610ddade0eb77c7f2a03",
"size": "6373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/xsp/data_utils/create_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
}
|
import random
from django.contrib.auth.models import User
from django.db import models
def generate_stat():
"""Roll 3D6 to make a stat!"""
return roll_dice(3, 6)
def roll_dice(times, sides, modifier=0):
"""
Simulate a dice roll of XdY + Z.
"Rolls" a die of Y sides X times, gets the sum, and adjusts it by an
optional modifier.
Example usage:
# Stats: 3d6
>>> roll_dice(3, 6)
# Saving throw: 1d20
>>> roll_dice(1, 20)
# Damage (longsword +1): 1d8 + 1
>>> roll_dice(1, 8, plus=1)
# Damage (cursed longsword - 2): 1d8 - 2
>>> roll_dice(1, 8, plus=-2)
"""
randint = random.randint
return sum(map(lambda x: randint(1, sides), range(times))) + modifier
class Alignment:
LAWFUL_GOOD = 'LG'
LAWFUL_NEUTRAL = 'LN'
LAWFUL_EVIL = 'LE'
NEUTRAL_GOOD = 'NG'
NEUTRAL = 'NN'
NEUTRAL_EVIL = 'NE'
CHAOTIC_GOOD = 'CG'
CHAOTIC_NEUTRAL = 'CN'
CHAOTIC_EVIL = 'CE'
CHOICES = (
(LAWFUL_GOOD, 'Lawful Good'),
(LAWFUL_NEUTRAL, 'Lawful Neutral'),
(LAWFUL_EVIL, 'Lawful Evil'),
(NEUTRAL_GOOD, 'Neutral Good'),
(NEUTRAL, 'Netural'),
(NEUTRAL_EVIL, 'Neutral Evil'),
(CHAOTIC_GOOD, 'Chaotic Good'),
(CHAOTIC_NEUTRAL, 'Chaotic Neutral'),
(CHAOTIC_EVIL, 'Chaotic Evil'),
)
class Race(models.Model):
name = models.CharField('race', max_length=200)
description = models.TextField()
def __str__(self):
return self.name
class Class(models.Model):
name = models.CharField('class', max_length=200)
description = models.TextField()
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Classes"
class Item(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
value = models.IntegerField(default=0)
weight = models.IntegerField(default=1)
def __str__(self):
return self.name
class CharacterManager(models.Manager):
def for_user(self, user):
return self.get_query_set().filter(player=user)
class Character(models.Model):
objects = CharacterManager()
player = models.ForeignKey(User)
name = models.CharField(max_length=200)
background = models.TextField()
race = models.ForeignKey(Race)
# This name isn't ideal, but 'class' is a Python builtin, so here we are...
# I went with 'cclass' as short for 'character class'.
cclass = models.ForeignKey(Class, verbose_name="class")
alignment = models.CharField(max_length=2, choices=Alignment.CHOICES,
default=Alignment.NEUTRAL)
level = models.IntegerField(default=1)
experience_points = models.IntegerField(default=0)
max_hit_points = models.IntegerField(default=10)
current_hit_points = models.IntegerField(default=10)
strength = models.IntegerField(default=generate_stat)
dexterity = models.IntegerField(default=generate_stat)
constitution = models.IntegerField(default=generate_stat)
intelligence = models.IntegerField(default=generate_stat)
wisdom = models.IntegerField(default=generate_stat)
charisma = models.IntegerField(default=generate_stat)
inventory = models.ManyToManyField(Item, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
|
{
"content_hash": "0d9caf8f7c614c48f81121a3f86d31b4",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 25.448529411764707,
"alnum_prop": 0.6431667148223057,
"repo_name": "mpirnat/django-tutorial-v2",
"id": "a78a68316a704850f56b7fb6ec1efc821e70f8c5",
"size": "3461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "characters/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23106"
}
],
"symlink_target": ""
}
|
import sys, csv, json, argparse
sys.path.append("../../")
from model import GeoMF_D, GeoMF_O, HeSig, Distance
settings = json.loads(open("../../../SETTINGS.json").read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=int, action='store', dest='model_id',
help='choose which model to learn from data')
parser.add_argument('-d', type=int, action='store',
dest='data_id', help='choose which data set to use')
if len(sys.argv) != 5:
print 'Command e.g.: python train.py -m 0(1,2,3,4) -d 0(1)'
sys.exit(1)
result_paths = [[settings["GEOMF-D_RESULT1"], settings["GEOMF-K_RESULT1"], settings["GEOMF-O_RESULT1"], settings["HESIG_RESULT1"], settings["DISTANCE_RESULT1"]], [settings["GEOMF-D_RESULT1"], settings["GEOMF-K_RESULT1"], settings["GEOMF-O_RESULT1"], settings["HESIG_RESULT1"], settings["DISTANCE_RESULT1"]]]
para = parser.parse_args()
if para.data_id == 0:
event_info_path = settings["ROOT_PATH"]+settings["SRC_DATA_FILE1_CITY1"]
event_train_path = settings["ROOT_PATH"]+settings["DATA1_CITY1_TRAIN"]
event_test_path = settings["ROOT_PATH"]+settings["DATA1_CITY1_TEST"]
elif para.data_id == 1:
event_info_path = settings["ROOT_PATH"]+settings["SRC_DATA_FILE1_CITY2"]
event_train_path = settings["ROOT_PATH"]+settings["DATA1_CITY2_TRAIN"]
event_test_path = settings["ROOT_PATH"]+settings["DATA1_CITY2_TEST"]
else:
print 'Invalid choice of dataset'
sys.exit(1)
result_path = settings["ROOT_PATH"] + result_paths[para.data_id][para.model_id]
if para.model_id == 0:
model = GeoMF_D(1, True, event_train_path, event_info_path, para.data_id)
model.model_init(event_train_path, event_info_path)
elif para.model_id == 1:
model = GeoMF_D(2, True, event_train_path, event_info_path, para.data_id)
model.model_init(event_train_path, event_info_path)
elif para.model_id == 2:
model = GeoMF_O(para.data_id)
model.model_init(event_train_path, event_info_path)
elif para.model_id == 3:
model = HeSig(2, True, event_train_path, event_info_path, para.data_id)
model.model_init(event_train_path, event_info_path)
elif para.model_id == 4:
model = Distance(para.data_id)
model.model_init(event_train_path, event_info_path)
else:
print 'Invalid choice of model'
sys.exit(1)
model.train()
model.genRecommendResult(event_test_path, result_path)
if __name__ == "__main__":
main()
|
{
"content_hash": "a017c62559f1cfd7c1d119d048080ea9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 311,
"avg_line_length": 43.644067796610166,
"alnum_prop": 0.6353398058252427,
"repo_name": "anthonylife/EventRecommendation",
"id": "00899265146a411fb6b2acd4d39c362891f8b21c",
"size": "3445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/GeoBased/PY/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "23097"
},
{
"name": "Python",
"bytes": "127209"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.contrib import admin
from actors.views import ActorsView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', ActorsView.as_view(), name="actors"),
]
|
{
"content_hash": "5a627a295ffab4acf54716ee7d36d1a4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 26.375,
"alnum_prop": 0.7061611374407583,
"repo_name": "bianchimro/django-search-views",
"id": "0d84e25ec28e7f3a146f0f10495e85ed34e9aaad",
"size": "211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sample_app/sample_app/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1070"
},
{
"name": "Python",
"bytes": "25278"
}
],
"symlink_target": ""
}
|
class Animal:
def __init__(self, name): # Constructor of the class
self.name = name
def talk(self): # Abstract method, defined by convention only
raise NotImplementedError("Subclass must implement abstract method")
class Cat(Animal):
def talk(self):
return 'Meow!'
class Dog(Animal):
def talk(self):
return 'Woof! Woof!'
animals = [Cat('Missy'),
Cat('Mr. Mistoffelees'),
Dog('Lassie')]
for animal in animals:
print(animal.name + ': ' + animal.talk())
|
{
"content_hash": "dfc369fdc2c6b2c6d33fb2ec6d2c7650",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 22.291666666666668,
"alnum_prop": 0.6056074766355141,
"repo_name": "TeamLab/Gachon_CS50_Python_KMOOC",
"id": "3346c1aecb68997324e2abb3cdbab8e242b9b26a",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/9/animal_polymorphism.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22635"
},
{
"name": "Python",
"bytes": "213003"
}
],
"symlink_target": ""
}
|
"""
For basic applications, MPI is as easy to use as any other
message-passing system. The sample code below contains the complete
communications skeleton for a dynamically load balanced master/slave
application. Following the code is a description of the few functions
necessary to write typical parallel applications.
For data (or embarrassingly) parallel problems.
Important parameters
--------------------
status = MPI.Status() # where all info is stored
# Receive results from a slave
result = comm.recv( # message buffer
source=MPI.ANY_SOURCE, # receive from any sender (-1)
tag=MPI.ANY_TAG, # any type of message (-1)
status=status) # info about the received msg (class)
# Send the slave a new work unit
comm.send(work, # message buffer
dest=status.Get_source(), # to whom we just received from
tag=WORKTAG) # user chosen message tag
"""
# Fernandoo Paolo <fpaolo@ucsd.edu>
# Jan 15, 2013
import os
import sys
import numpy as np
from mpi4py import MPI
from Queue import Queue
WORKTAG = 1
DIETAG = 2
class Work(object):
def __init__(self, prog, files):
# importat: sort by file size in decreasing order!
files.sort(key=lambda f: os.stat(f).st_size, reverse=True)
q = Queue()
for f in files:
q.put(' '.join([prog, f]))
self.work = q
def get_next(self):
if self.work.empty():
return None
return self.work.get()
def do_work(work):
if '.py' in work:
os.system('python ' + work)
else:
os.system(work) # for './'
return
def process_result(result):
pass
def master(comm):
num_procs = comm.Get_size()
status = MPI.Status()
# generate work queue
wq = Work(sys.argv[1], sys.argv[2:])
# Seed the slaves, send one unit of work to each slave (rank)
for rank in xrange(1, num_procs):
work = wq.get_next()
comm.send(work, dest=rank, tag=WORKTAG)
# Loop over getting new work requests until there is no more work to be done
while True:
work = wq.get_next()
if not work: break
# Receive results from a slave
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
#process_result(result)
# Send the slave a new work unit
comm.send(work, dest=status.Get_source(), tag=WORKTAG)
# No more work to be done, receive all outstanding results from slaves
for rank in xrange(1, num_procs):
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
#process_result(result)
# Tell all the slaves to exit by sending an empty message with DIETAG
for rank in xrange(1, num_procs):
comm.send(0, dest=rank, tag=DIETAG)
def slave(comm):
my_rank = comm.Get_rank()
status = MPI.Status()
while True:
# Receive a message from the master
work = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
# Check the tag of the received message
if status.Get_tag() == DIETAG: break
# Do the work
result = do_work(work)
# Send the result back
comm.send(result, dest=0, tag=0)
def main():
comm = MPI.COMM_WORLD
my_rank = comm.Get_rank()
my_name = MPI.Get_processor_name()
#comm.Barrier()
#start = MPI.Wtime()
if my_rank == 0:
master(comm)
else:
slave(comm)
#comm.Barrier()
#end = MPI.Wtime()
#print 'time:', end - start
if __name__ == '__main__':
main()
|
{
"content_hash": "9cea4643e091a4aca7ac0d4ad64a1f44",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 81,
"avg_line_length": 26.639705882352942,
"alnum_prop": 0.6075075903947005,
"repo_name": "fspaolo/mpisubmit",
"id": "1d8ce4d958e4602b068f163390772a70dd0069ee",
"size": "3645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpi-submit2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4858"
},
{
"name": "Shell",
"bytes": "1069"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Contract.budget'
db.alter_column(u'build_contract', 'budget', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
# Changing field 'Contract.summ_mo_money'
db.alter_column(u'build_contract', 'summ_mo_money', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Contract.summ_without_mo_money'
db.alter_column(u'build_contract', 'summ_without_mo_money', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Contract.summa'
db.alter_column(u'build_contract', 'summa', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'CopyContract.budget'
db.alter_column(u'build_copycontract', 'budget', self.gf('django.db.models.fields.SmallIntegerField')(null=True))
# Changing field 'CopyContract.summ_mo_money'
db.alter_column(u'build_copycontract', 'summ_mo_money', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'CopyContract.summ_without_mo_money'
db.alter_column(u'build_copycontract', 'summ_without_mo_money', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'CopyContract.summa'
db.alter_column(u'build_copycontract', 'summa', self.gf('django.db.models.fields.FloatField')(null=True))
def backwards(self, orm):
# Changing field 'Contract.budget'
db.alter_column(u'build_contract', 'budget', self.gf('django.db.models.fields.IntegerField')(max_length=1024, null=True))
# Changing field 'Contract.summ_mo_money'
db.alter_column(u'build_contract', 'summ_mo_money', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Contract.summ_without_mo_money'
db.alter_column(u'build_contract', 'summ_without_mo_money', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Contract.summa'
db.alter_column(u'build_contract', 'summa', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'CopyContract.budget'
db.alter_column(u'build_copycontract', 'budget', self.gf('django.db.models.fields.IntegerField')(max_length=1024, null=True))
# Changing field 'CopyContract.summ_mo_money'
db.alter_column(u'build_copycontract', 'summ_mo_money', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'CopyContract.summ_without_mo_money'
db.alter_column(u'build_copycontract', 'summ_without_mo_money', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'CopyContract.summa'
db.alter_column(u'build_copycontract', 'summa', self.gf('django.db.models.fields.IntegerField')(null=True))
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'budget': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'acceptance_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'approval_citizen_statement': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'building_permissions': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cost_infos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'facility_permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hiring_contract': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_right_stating': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_certificate': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_notice_to_citizen': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_act_to_fond': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'photos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'protocols': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tec_passport_tec_plan': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'transmission_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.copybuilding': {
'Meta': {'object_name': 'CopyBuilding'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.copycontract': {
'Meta': {'object_name': 'CopyContract'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'budget': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.basehallway': {
'Meta': {'object_name': 'BaseHallway'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basekitchen': {
'Meta': {'object_name': 'BaseKitchen'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.baseroom': {
'Meta': {'object_name': 'BaseRoom'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basewc': {
'Meta': {'object_name': 'BaseWC'},
'bath_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_toilet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_tower_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'wc_switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
u'core.hallway': {
'Meta': {'object_name': 'Hallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.kitchen': {
'Meta': {'object_name': 'Kitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'stove': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.room': {
'Meta': {'object_name': 'Room', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.wc': {
'Meta': {'object_name': 'WC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'separate': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'})
}
}
complete_apps = ['build']
|
{
"content_hash": "9d8f48db3098496d091abdfd24d81190",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 174,
"avg_line_length": 95.79333333333334,
"alnum_prop": 0.5573340756721646,
"repo_name": "zionist/mon",
"id": "f41724921a033e091a75c582c2b88d5790ad5851",
"size": "43131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mon/apps/build/migrations/0018_auto__chg_field_contract_budget__chg_field_contract_summ_mo_money__chg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4209"
},
{
"name": "CoffeeScript",
"bytes": "2260"
},
{
"name": "JavaScript",
"bytes": "2957"
},
{
"name": "Python",
"bytes": "2730487"
}
],
"symlink_target": ""
}
|
SLEEP = 900 # 15 min
import spasm.data.sources as _data
import spasm.web.sources as _web
import time
Data = _data.MySpace()
Web = _web.MySpace()
def run(artist):
if artist['myspace_url']:
# Update stats, shows, news
web2data = (
(Web.get_stats, Data.add_stats),
(Web.get_shows, Data.update_show),
(Web.get_news, Data.update_news))
for web, data in web2data:
dd = web(artist)
for d in dd:
data(d)
if __name__ == "__main__":
while True:
artists = Data.get_artists()
for artist in artists:
run(artist)
time.sleep(SLEEP)
|
{
"content_hash": "d8306e54139ae6ed92967cb1bc5d266c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 46,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.5202312138728323,
"repo_name": "msmathers/SpasmDB",
"id": "2f65004fa62676af28a3ba3a5ba8605f1bcbea1e",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spasm/crawlers/myspace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "24843"
},
{
"name": "Python",
"bytes": "65738"
}
],
"symlink_target": ""
}
|
import unittest
from Kamaelia.Sketch.TagFilterComponent import TagFilter
class TestSubtitleFilter(unittest.TestCase):
def setUp(self):
self.sf = TagFilter()
def test_BasicStringPasses(self):
"""Tests that a tag free short string passes through unchanged."""
teststring = "This is a normal string."
out = self.sf.filter(teststring) + self.sf.filter("")
self.failUnless(teststring == out,out)
def test_MaintestAllAtOnce(self):
"""Tests that a large string including tags is properly filtered. See in the file to view the test input and output."""
output = self.sf.filter(themaintestinput) + self.sf.filter("")
self.failUnless(themaintestoutput == output)
def test_locateDifferences(self):
"""Repeats the all at one test but also tries to give details about where
the files differed. This is more a debugging tool than a pass/fail unittest."""
out = self.sf.filter(themaintestinput) + self.sf.filter("")
for i in xrange(0,len(out)):
if out[i] != themaintestoutput[i]:
self.fail("Difference at character " + str(i) + " " + out[i] + "\n" + out[i-90:i+45] + "\n" + themaintestoutput[i-90:i+45])
def test_bitbybit(self):
"""Repeats the main test with the same strings but this time passes the
text to the filter a chunk at a time. It also tries to print the location
of problems to assist debugging."""
out = ""
pos = 0
while pos <= len(themaintestinput):
out = out + self.sf.filter(themaintestinput[pos:pos +20])
pos = pos + 20
out = out + self.sf.filter("")
for i in xrange(0,len(out)):
if out[i] != themaintestoutput[i]:
self.fail("Difference at character " + str(i) + " " + out[i] + "\n" + out[i-90:i+45] + "\n" + themaintestoutput[i-90:i+45])
self.failUnless(out == themaintestoutput)
def suite():
return unittest.makeSuite(TestSubtitleFilter)
themaintestinput = """<font color="#FFFF00"/> careful decision whether it will<font color="#FFFF00"/> enhance his career. He's not the<font color="#FFFF00"/> best in England u Frank Lamp ard<font color="#FFFF00"/> won the player of the year. And<font color="#FFFF00"/> both of them, we might bin the -<font color="#FFFF00"/> win the World Cup!.<font color="#FFFFFF"/> Getting ahead<font color="#FFFFFF"/> of yourself!<font color="#FFFFFF"/> Shouldn't praise be given to both<font color="#FFFFFF"/> teams, without the diving and<font color="#FFFFFF"/> screaming at referees. And TS says<font color="#FFFFFF"/> it was a great advert for English<font color="#FFFFFF"/> football.<font color="#FFFF00"/> I think it was a good<font color="#FFFF00"/> point. The Milan team, the Italian<font color="#FFFF00"/> side you might have thought they<font color="#FFFF00"/>would resort to unsavoury tactics-"""
themaintestoutput = """ careful decision whether it will enhance his career. He's not the best in England u Frank Lamp ard won the player of the year. And both of them, we might bin the - win the World Cup!. Getting ahead of yourself! Shouldn't praise be given to both teams, without the diving and screaming at referees. And TS says it was a great advert for English football. I think it was a good point. The Milan team, the Italian side you might have thought theywould resort to unsavoury tactics-"""
if __name__=='__main__':
unittest.main()
|
{
"content_hash": "ce2e4cfe67f3846358882868a693827b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 894,
"avg_line_length": 68.6,
"alnum_prop": 0.6787172011661807,
"repo_name": "bbc/kamaelia",
"id": "5dd38ea05402cbfe17dce06585bbd43c1281b227",
"size": "4332",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/Old/test/test_TagFilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""
Yet Another S3-backed File System, or yas3fs
is a FUSE file system that is designed for speed
caching data locally and using SNS to notify
other nodes for changes that need cache invalidation.
"""
import urllib
import argparse
import errno
import stat
import time
import os
import os.path
import mimetypes
import sys
import json
import threading
import socket
import itertools
import base64
import logging
import signal
import io
import re
import uuid
import copy
import traceback
import datetime as dt
import gc # For debug only
import pprint # For debug only
from tempfile import mkdtemp
from shutil import rmtree
if sys.version_info < (3, ): # python2
from urllib import unquote_plus
from urlparse import urlparse
from Queue import Queue
from Queue import Empty as QueueEmpty
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from urllib2 import urlopen
else: # python3
from urllib.parse import urlparse
from urllib.parse import unquote_plus
from queue import Queue
from queue import Empty as QueueEmpty
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from sys import exit
from functools import wraps
from .fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context
import boto
import boto.s3
import boto.s3.connection
import boto.sns
import boto.sqs
import boto.utils
from boto.utils import compute_md5, compute_hash
from boto.s3.key import Key
import boto3
from .YAS3FSPlugin import YAS3FSPlugin
from ._version import __version__
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
class UTF8DecodingKey(boto.s3.key.Key):
BufferSize = 131072
def __init__(self, key_or_bucket=None, name=None):
if isinstance(key_or_bucket, boto.s3.key.Key):
# this is a key,
self.__dict__.update(key_or_bucket.__dict__)
if name is not None:
self.name = name
else:
# this is a bucket
super(UTF8DecodingKey, self).__init__(key_or_bucket,name)
def __str__(self):
if self.name is None:
return 'None'
if isinstance(self.name, bytes):
return self.name.decode('utf8', 'replace')
return self.name
def compute_md5(self, fp, size=None):
hex_digest, b64_digest, data_size = compute_md5(fp, buf_size=131072, size=size)
self.size = data_size
return (hex_digest, b64_digest)
class Interval():
""" Simple integer interval arthmetic."""
def __init__(self):
self.l = [] # A list of tuples
def add(self, t):
assert t[0] <= t[1]
nl = []
for i in self.l:
i0 = i[0] - 1 # To take into account consecutive _integer_ intervals
i1 = i[1] + 1 # Same as above
if (i0 <= t[0] and t[0] <= i1) or (i0 <= t[1] and t[1]<= i1) or (t[0] <= i[0] and i[1] <= t[1]):
t[0] = min(i[0], t[0]) # Enlarge t interval
t[1] = max(i[1], t[1])
else:
nl.append(i)
nl.append(t)
self.l = nl
def contains(self, t):
assert t[0] <= t[1]
for i in self.l:
if (i[0] <= t[0] and t[1] <= i[1]):
return True
return False
def intersects(self, t):
assert t[0] <= t[1]
for i in self.l:
if (i[0] <= t[0] and t[0] <= i[1]) or (i[0] <= t[1] and t[1]<= i[1]) or (t[0] <= i[0] and i[1] <= t[1]):
return True
return False
class ISO8601Formatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
if datefmt:
return super(ISO8601Formatter, self).formatTime(record, datefmt)
ct = self.converter(record.created)
return "%s.%03d" % (time.strftime("%Y-%m-%dT%H:%M:%S", ct), record.msecs)
class LinkedListElement():
""" The element of a linked list."""
def __init__(self, value, next=None):
self.value = value
if next:
self.append(next)
else:
self.next = None
self.prev = None
def delete(self):
self.prev.next = self.next
self.next.prev = self.prev
return self.value
def append(self, next):
self.prev = next.prev
self.next = next
next.prev.next = self
next.prev = self
class LinkedList():
""" A linked list that is used by yas3fs as a LRU index
for the file system cache."""
def __init__(self):
self.tail = LinkedListElement(None)
self.head = LinkedListElement(None)
self.head.next = self.tail
self.tail.prev = self.head
self.index = {}
self.lock = threading.RLock()
def append(self, value):
with self.lock:
if value not in self.index:
new = LinkedListElement(value, self.tail)
self.index[value] = new
def popleft(self):
with self.lock:
if self.head.next != self.tail:
value = self.head.next.delete()
del self.index[value]
return value
else:
return None
def delete(self, value):
with self.lock:
if value in self.index:
self.index[value].delete()
del self.index[value]
def move_to_the_tail(self, value):
with self.lock:
if value in self.index:
old = self.index[value]
old.delete()
old.append(self.tail)
class FSRange():
"""A range used to manage buffered downloads from S3."""
io_wait = 3.0 # 3 seconds
def __init__(self):
self.interval = Interval()
self.ongoing_intervals = {}
self.event = threading.Event()
self.lock = threading.RLock()
def wait(self):
self.event.wait(self.io_wait)
def wake(self, again=True):
with self.lock:
e = self.event
if again:
self.event = threading.Event()
e.set()
class FSData():
"""The data (content) associated with a file."""
stores = [ 'mem', 'disk' ]
unknown_store = "Unknown store"
def __init__(self, cache, store, path):
self.cache = cache
self.store = store
self.path = path
self.props = {}
self.size = 0
self.etag = None # Something better ???
if store == 'mem':
self.content = io.BytesIO()
elif store == 'disk':
previous_file = False
filename = self.cache.get_cache_filename(self.path)
if os.path.isfile(filename):
logger.debug("found previous cache file '%s'" % filename)
# There's a file already there
self.content = open(filename, mode='rb+')
self.update_size()
self.content.close()
self.set('new', None) # Not sure it is the latest version
# Now search for an etag file
etag_filename = self.cache.get_cache_etags_filename(self.path)
if os.path.isfile(etag_filename):
logger.debug("found previous cache etag file '%s'" % etag_filename)
with open(etag_filename, mode='r') as etag_file:
self.etag = etag_file.read()
previous_file = True
if not previous_file:
logger.debug("creating new cache file '%s'" % filename)
with self.cache.disk_lock:
create_dirs_for_file(filename)
open(filename, mode='w').close() # To create an empty file (and overwrite a previous file)
logger.debug("created new cache file '%s'" % filename)
self.content = None # Not open, yet
else:
raise Exception(FSData.unknown_store)
def get_lock(self, wait_until_cleared_proplist = None):
return self.cache.get_lock(self.path, wait_until_cleared_proplist)
def open(self):
with self.get_lock():
if not self.has('open'):
if self.store == 'disk':
filename = self.cache.get_cache_filename(self.path)
self.content = open(filename, mode='rb+')
self.inc('open')
def close(self):
with self.get_lock():
self.dec('open')
if not self.has('open'):
if self.store == 'disk':
self.content.close()
self.content = None
def update_etag(self, new_etag, wait_until_cleared_proplist = None):
with self.get_lock(wait_until_cleared_proplist):
if new_etag != self.etag:
self.etag = new_etag
if self.store == 'disk':
filename = self.cache.get_cache_etags_filename(self.path)
with self.cache.disk_lock:
create_dirs_for_file(filename)
with open(filename, mode='w') as etag_file:
etag_file.write(new_etag)
def get_current_size(self):
if self.content:
with self.get_lock():
self.content.seek(0,2)
return self.content.tell()
else:
return 0 # There's no content...
def update_size(self, final=False):
with self.get_lock():
if final:
current_size = 0 # The entry is to be deleted
else:
current_size = self.get_current_size()
delta = current_size - self.size
self.size = current_size
with self.cache.data_size_lock:
self.cache.size[self.store] += delta
def get_content(self, wait_until_cleared_proplist = None):
with self.get_lock(wait_until_cleared_proplist):
if self.store == 'disk':
filename = self.cache.get_cache_filename(self.path)
return open(filename, mode='rb+')
else:
return self.content
def get_content_as_string(self):
if self.store == 'mem':
with self.get_lock():
return self.content.getvalue()
elif self.store == 'disk':
with self.get_lock():
self.content.seek(0) # Go to the beginning
return self.content.read()
else:
raise Exception(FSData.unknown_store)
def has(self, prop):
with self.get_lock():
return prop in self.props
def get(self, prop):
with self.get_lock():
try:
return self.props[prop]
except KeyError:
return None
def set(self, prop, value):
with self.get_lock():
self.props[prop] = value
def inc(self, prop):
with self.get_lock():
try:
self.props[prop] += 1
except KeyError:
self.props[prop] = 1
def dec(self, prop):
with self.get_lock():
try:
if self.props[prop] > 1:
self.props[prop] -= 1
else:
del self.props[prop]
except KeyError:
pass # Nothing to do
def delete(self, prop=None, wait_until_cleared_proplist = None):
with self.get_lock(wait_until_cleared_proplist):
if prop is None:
if self.store == 'disk':
filename = self.cache.get_cache_filename(self.path)
with self.cache.disk_lock:
if os.path.isfile(filename):
logger.debug("unlink cache file '%s'" % filename)
os.unlink(filename)
remove_empty_dirs_for_file(filename)
etag_filename = self.cache.get_cache_etags_filename(self.path)
with self.cache.disk_lock:
if os.path.isfile(etag_filename):
logger.debug("unlink cache etag file '%s'" % etag_filename)
os.unlink(etag_filename)
remove_empty_dirs_for_file(etag_filename)
self.content = None # If not
self.update_size(True)
for p in list(self.props.keys()):
self.delete(p)
elif prop in self.props:
if prop == 'range':
logger.debug('there is a range to delete')
data_range = self.get(prop)
else:
data_range = None
del self.props[prop]
if data_range:
logger.debug('wake after range delete')
data_range.wake(False) # To make downloading threads go on... and then exit
# for https://github.com/danilop/yas3fs/issues/52
if prop == 'change' and 'invoke_after_change' in self.props:
logger.debug('FSData.props[change] removed, now executing invoke_after_change lambda for: ' + self.path)
self.get('invoke_after_change')(self.path)
del self.props['invoke_after_change'] # cLeanup
def rename(self, new_path):
with self.get_lock():
if self.store == 'disk':
filename = self.cache.get_cache_filename(self.path)
new_filename = self.cache.get_cache_filename(new_path)
etag_filename = self.cache.get_cache_etags_filename(self.path)
new_etag_filename = self.cache.get_cache_etags_filename(new_path)
with self.cache.disk_lock:
create_dirs_for_file(new_filename)
os.rename(filename, new_filename)
with self.cache.disk_lock:
remove_empty_dirs_for_file(filename)
if os.path.isfile(etag_filename):
with self.cache.disk_lock:
create_dirs_for_file(new_etag_filename)
os.rename(etag_filename, new_etag_filename)
with self.cache.disk_lock:
remove_empty_dirs_for_file(etag_filename)
if self.content:
self.content = open(new_filename, mode='rb+')
self.path = new_path
class FSCache():
""" File System Cache """
def __init__(self, cache_path=None):
self.cache_path = cache_path
self.lock = threading.RLock()
self.disk_lock = threading.RLock() # To safely remove empty disk directories
self.data_size_lock = threading.RLock()
self.reset_all()
def reset_all(self):
with self.lock:
self.entries = {}
self.new_locks = {} # New locks (still) without entry in the cache
self.unused_locks = {} # Paths with unused locks that will be removed on the next purge if remain unused
self.lru = LinkedList()
self.size = {}
for store in FSData.stores:
self.size[store] = 0
def get_memory_usage(self):
return [ len(self.entries) ] + [ self.size[store] for store in FSData.stores ]
def get_cache_filename(self, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
return self.cache_path + '/files' + path # path begins with '/'
def get_cache_etags_filename(self, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
return self.cache_path + '/etags' + path # path begins with '/'
def is_deleting(self, path, prop = 'deleting'):
if not self.has(path, prop):
return False
if self.get(path, prop) == 0:
return False
return True
def is_ready(self, path, proplist = None):
return self.wait_until_cleared(path, proplist = proplist)
def wait_until_cleared(self, path, proplist = None, max_retries = 10, wait_time = 1):
default_proplist = ['deleting', 's3_busy']
if proplist is None:
proplist = default_proplist
for prop in proplist:
if not self.has(path, prop):
continue
cleared = False
for check_count in range(0, max_retries):
if check_count:
logger.debug("wait_until_cleared %s found something for %s. (%i) "%(prop, path, check_count))
# the cache/key disappeared
if not self.has(path, prop):
logger.debug("wait_until_cleared %s did not find %s anymore."%(prop, path))
cleared = True
break
# the cache got a '.dec()' from do_on_s3_now...
if self.get(path, prop) == 0:
logger.debug("wait_until_cleared %s got all dec for %s anymore."%(prop, path))
cleared = True
break
time.sleep(wait_time)
if not cleared:
# import inspect
# inspect_stack = inspect.stack()
# logger.critical("WAIT_UNTIL_CLEARED stack: '%s'"% pp.pformat(inspect_stack))
logger.error("wait_until_cleared %s could not clear '%s'" % (prop, path))
raise Exception("Path has not yet been cleared but operation wants to happen on it '%s' '%s'"%(prop, path))
return True
def get_lock(self, path, skip_is_ready = False, wait_until_cleared_proplist = None):
if not skip_is_ready:
self.is_ready(path, proplist = wait_until_cleared_proplist)
with self.lock: # Global cache lock, used only for giving file-level locks
try:
lock = self.entries[path]['lock']
return lock
except KeyError:
try:
return self.new_locks[path]
except KeyError:
new_lock = threading.RLock()
self.new_locks[path] = new_lock
return new_lock
def add(self, path):
with self.get_lock(path):
if not path in self.entries:
self.entries[path] = {}
self.entries[path]['lock'] = self.new_locks[path]
del self.new_locks[path]
self.lru.append(path)
def delete(self, path, prop=None):
with self.get_lock(path):
if path in self.entries:
if prop is None:
for p in list(self.entries[path].keys()):
self.delete(path, p)
del self.entries[path]
self.lru.delete(path)
else:
if prop in self.entries[path]:
if prop == 'data':
data = self.entries[path][prop]
data.delete() # To clean stuff, e.g. remove cache files
elif prop == 'lock':
# Preserve lock, let the unused locks check remove it later
self.new_locks[path] = self.entries[path][prop]
del self.entries[path][prop]
def rename(self, path, new_path):
with self.get_lock(path) and self.get_lock(new_path):
if path in self.entries:
self.delete(path, 'key') # Cannot be renamed
self.delete(new_path) # Assume overwrite
if 'data' in self.entries[path]:
data = self.entries[path]['data']
with data.get_lock():
data.rename(new_path)
self.entries[new_path] = copy.copy(self.entries[path])
self.lru.append(new_path)
self.lru.delete(path)
# 6.59 working except rename...
# del self.entries[path] # So that the next reset doesn't delete the entry props
self.inc(path, 'deleting')
self.inc(new_path, 's3_busy')
def get(self, path, prop=None):
self.lru.move_to_the_tail(path) # Move to the tail of the LRU cache
try:
if prop is None:
return self.entries[path]
else:
return self.entries[path][prop]
except KeyError:
return None
def set(self, path, prop, value):
self.lru.move_to_the_tail(path) # Move to the tail of the LRU cache
with self.get_lock(path):
if path in self.entries:
if prop in self.entries[path]:
self.delete(path, prop)
self.entries[path][prop] = value
return True
return False
def inc(self, path, prop):
self.lru.move_to_the_tail(path) # Move to the tail of the LRU cache
with self.get_lock(path):
if path in self.entries:
try:
self.entries[path][prop] += 1
except KeyError:
self.entries[path][prop] = 1
def dec(self, path, prop):
self.lru.move_to_the_tail(path) # Move to the tail of the LRU cache
with self.get_lock(path):
if path in self.entries:
try:
if self.entries[path][prop] > 1:
self.entries[path][prop] -= 1
else:
del self.entries[path][prop]
except KeyError:
pass # Nothing to do
def reset(self, path, with_deleting = True):
with self.get_lock(path):
self.delete(path)
self.add(path)
if with_deleting:
self.inc(path, 'deleting')
def has(self, path, prop=None):
self.lru.move_to_the_tail(path) # Move to the tail of the LRU cache
if prop is None:
return path in self.entries
else:
try:
return prop in self.entries[path]
except KeyError:
return False
def is_empty(self, path): # To improve readability
if self.has(path) and not self.has(path, 'attr'):
return True
else:
return False
###try:
### return len(self.get(path)) <= 1 # Empty or just with 'lock'
###except TypeError: # if get returns None
### return False
def is_not_empty(self, path): # To improve readability
if self.has(path) and self.has(path, 'attr'):
return True
else:
return False
###try:
### return len(self.get(path)) > 1 # More than just 'lock'
###except TypeError: # if get returns None
### return False
class SNS_HTTPServer(HTTPServer):
""" HTTP Server to receive SNS notifications via HTTP """
def set_fs(self, fs):
self.fs = fs
class SNS_HTTPRequestHandler(BaseHTTPRequestHandler):
""" HTTP Request Handler to receive SNS notifications via HTTP """
def do_POST(self):
if self.path != self.server.fs.http_listen_path:
self.send_response(404)
return
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
message_type = self.headers.getheader('x-amz-sns-message-type')
message_content = json.loads(post_body)
# Check SNS signature, I was not able to use boto for this...
url = message_content['SigningCertURL']
if not hasattr(self, 'certificate_url') or self.certificate_url != url:
logger.debug('downloading certificate')
self.certificate_url = url
self.certificate = urlopen(url).read()
signature_version = message_content['SignatureVersion']
if signature_version != '1':
logger.debug('unknown signature version')
self.send_response(404)
return
signature = message_content['Signature']
del message_content['SigningCertURL']
del message_content['SignatureVersion']
del message_content['Signature']
if 'UnsubscribeURL' in message_content:
del message_content['UnsubscribeURL']
string_to_sign = '\n'.join(list(itertools.chain.from_iterable(
[ (k, message_content[k]) for k in sorted(message_content.keys()) ]
))) + '\n'
import M2Crypto # Required to check integrity of SNS HTTP notifications
cert = M2Crypto.X509.load_cert_string(self.certificate)
pub_key = cert.get_pubkey().get_rsa()
verify_evp = M2Crypto.EVP.PKey()
verify_evp.assign_rsa(pub_key)
verify_evp.reset_context(md='sha1')
verify_evp.verify_init()
verify_evp.verify_update(string_to_sign.encode('ascii'))
if verify_evp.verify_final(signature.decode('base64')):
self.send_response(200)
if message_type== 'Notification':
message = message_content['Message']
logger.debug('message = %s' % message)
self.server.fs.process_message(message)
elif message_type == 'SubscriptionConfirmation':
token = message_content['Token']
response = self.server.fs.sns.confirm_subscription(self.server.fs.sns_topic_arn, token)
self.server.fs.http_subscription = response['ConfirmSubscriptionResponse']['ConfirmSubscriptionResult']['SubscriptionArn']
logger.debug('SNS HTTP subscription = %s' % self.server.fs.http_subscription)
else:
logger.debug('unknown message type')
return
else:
logger.debug('wrong signature')
# If nothing better, return 404
self.send_response(404)
def do_GET(self):
logger.debug('http get')
self.send_response(404)
def do_HEAD(self):
logger.debug('http head')
self.send_response(404)
class PartOfFSData():
""" To read just a part of an existing FSData, inspired by FileChunkIO """
def __init__(self, data, start, length):
self.content = data.get_content()
self.start = start
self.length = length
self.pos = 0
self.init_start = start
self.init_length = length
self.init_pos = 0
def seek(self, offset, whence=0):
logger.debug("seek '%s' '%i' '%i' " % (self.content, offset, whence))
if whence == 0:
self.pos = offset
elif whence == 1:
self.pos = self.pos + offset
elif whence == 2:
self.pos = self.length + offset
def tell(self):
return self.pos
def read(self, n=-1):
logger.debug("read '%i' '%s' at '%i' starting from '%i' for '%i'" % (n, self.content, self.pos, self.start, self.length))
if n >= 0:
n = min([n, self.length - self.pos])
self.content.seek(self.start + self.pos)
s = self.content.read(n)
if len(s) != n:
logger.error("read length not-equal! '%i' '%s' at '%i' starting from '%i' for '%i' length of return ['%s] " % (n, self.content, self.pos, self.start, self.length, len(s)))
self.pos += len(s)
return s
else:
return self.readall()
def readall(self):
return self.read(self.length - self.pos)
class YAS3FS(LoggingMixIn, Operations):
""" Main FUSE Operations class for fusepy """
def __init__(self, options):
logger.info("Version: %s" % __version__)
# Some constants
### self.http_listen_path_length = 30
self.running = True
self.check_status_interval = 5.0 # Seconds, no need to configure that
self.s3_retries = options.s3_retries # Maximum number of S3 retries (outside of boto)
logger.info("s3-retries: '%i'" % self.s3_retries)
self.s3_retries_sleep = options.s3_retries_sleep # retry sleep in seconds
logger.info("s3-retries-sleep: '%i' seconds" % self.s3_retries_sleep)
self.yas3fs_xattrs = [ 'user.yas3fs.bucket', 'user.yas3fs.key', 'user.yas3fs.URL', 'user.yas3fs.signedURL', 'user.yas3fs.expiration' ]
self.multipart_uploads_in_progress = 0
# Initialization
global debug
debug = options.debug
# Parameters and options handling
self.aws_region = options.region
s3url = urlparse(options.s3path.lower())
if s3url.scheme != 's3':
error_and_exit("The S3 path to mount must be in URL format: s3://BUCKET/PATH")
self.s3_bucket_name = s3url.netloc
logger.info("S3 bucket: '%s'" % self.s3_bucket_name)
self.s3_prefix = s3url.path.strip('/')
logger.info("S3 prefix (can be empty): '%s'" % self.s3_prefix)
if self.s3_bucket_name == '':
error_and_exit("The S3 bucket cannot be empty")
self.sns_topic_arn = options.topic
if self.sns_topic_arn:
logger.info("AWS region for SNS and SQS: '" + self.aws_region + "'")
logger.info("SNS topic ARN: '%s'" % self.sns_topic_arn)
self.sqs_queue_name = options.queue # must be different for each client
self.new_queue = options.new_queue
self.new_queue_with_hostname = options.new_queue_with_hostname
if self.new_queue_with_hostname:
self.new_queue = self.new_queue_with_hostname
self.queue_wait_time = options.queue_wait
self.queue_polling_interval = options.queue_polling
if self.sqs_queue_name:
logger.info("SQS queue name: '%s'" % self.sqs_queue_name)
if self.sqs_queue_name or self.new_queue:
logger.info("SQS queue wait time (in seconds): '%i'" % self.queue_wait_time)
logger.info("SQS queue polling interval (in seconds): '%i'" % self.queue_polling_interval)
self.cache_entries = options.cache_entries
logger.info("Cache entries: '%i'" % self.cache_entries)
self.cache_mem_size = options.cache_mem_size * (1024 * 1024) # To convert MB to bytes
logger.info("Cache memory size (in bytes): '%i'" % self.cache_mem_size)
self.cache_disk_size = options.cache_disk_size * (1024 * 1024) # To convert MB to bytes
logger.info("Cache disk size (in bytes): '%i'" % self.cache_disk_size)
self.cache_on_disk = options.cache_on_disk # Bytes
logger.info("Cache on disk if file size greater than (in bytes): '%i'" % self.cache_on_disk)
self.cache_check_interval = options.cache_check # seconds
logger.info("Cache check interval (in seconds): '%i'" % self.cache_check_interval)
self.recheck_s3 = options.recheck_s3
logger.info("Cache ENOENT rechecks S3: %s" % self.recheck_s3)
self.aws_managed_encryption = options.aws_managed_encryption
logger.info("AWS Managed Encryption enabled: %s" % self.aws_managed_encryption)
self.aws_managed_encryption = options.aws_managed_encryption
logger.info("AWS Managed Encryption enabled: %s" % self.aws_managed_encryption)
self.st_blksize = None
if options.st_blksize:
self.st_blksize = options.st_blksize
logger.info("getattr() st_blksize: '%i'" % self.st_blksize)
if options.use_ec2_hostname:
instance_metadata = boto.utils.get_instance_metadata() # Very slow (to fail) outside of EC2
self.hostname = instance_metadata['public-hostname']
else:
self.hostname = options.hostname
if self.hostname:
logger.info("Public hostname to listen to SNS HTTP notifications: '%s'" % self.hostname)
self.sns_http_port = int(options.port or '0')
if options.port:
logger.info("TCP port to listen to SNS HTTP notifications: '%i'" % self.sns_http_port)
self.s3_num = options.s3_num
logger.info("Number of parallel S3 threads (0 to disable writeback): '%i'" % self.s3_num)
self.download_num = options.download_num
logger.info("Number of parallel downloading threads: '%i'" % self.download_num)
# for https://github.com/danilop/yas3fs/issues/46
self.download_retries_num = options.download_retries_num
logger.info("Number download retry attempts: '%i'" % self.download_retries_num)
self.download_retries_sleep = options.download_retries_sleep
logger.info("Download retry sleep time seconds: '%i'" % self.download_retries_sleep)
self.read_retries_num = options.read_retries_num
logger.info("Number read retry attempts: '%i'" % self.read_retries_num)
self.read_retries_sleep = options.read_retries_sleep
logger.info("Read retry sleep time seconds: '%i'" % self.read_retries_sleep)
self.prefetch_num = options.prefetch_num
logger.info("Number of parallel prefetching threads: '%i'" % self.prefetch_num)
self.buffer_size = options.buffer_size * 1024 # To convert KB to bytes
logger.info("Download buffer size (in KB, 0 to disable buffering): '%i'" % self.buffer_size)
self.buffer_prefetch = options.buffer_prefetch
logger.info("Number of buffers to prefetch: '%i'" % self.buffer_prefetch)
self.write_metadata = not options.no_metadata
logger.info("Write metadata (file system attr/xattr) on S3: '%s'" % str(self.write_metadata))
self.full_prefetch = options.prefetch
logger.info("Download prefetch: '%s'" % str(self.full_prefetch))
self.multipart_size = options.mp_size * (1024 * 1024) # To convert MB to bytes
logger.info("Multipart size: '%s'" % str(self.multipart_size))
self.multipart_num = options.mp_num
logger.info("Multipart maximum number of parallel threads: '%s'" % str(self.multipart_num))
self.multipart_retries = options.mp_retries
logger.info("Multipart maximum number of retries per part: '%s'" % str(self.multipart_retries))
self.default_expiration = options.expiration
logger.info("Default expiration for signed URLs via xattrs: '%s'" % str(self.default_expiration))
self.requester_pays = options.requester_pays
logger.info("S3 Request Payer: '%s'" % str(self.requester_pays))
self.default_headers = {}
if self.requester_pays:
self.default_headers = { 'x-amz-request-payer' : 'requester' }
crypto_headers = {}
if self.aws_managed_encryption:
crypto_headers = { 'x-amz-server-side-encryption' : 'AES256' }
self.default_write_headers = copy.copy(self.default_headers)
self.default_write_headers.update(crypto_headers)
self.darwin = options.darwin # To tailor ENOATTR for OS X
# Internal Initialization
if options.cache_path:
cache_path_prefix = options.cache_path
else:
cache_path_prefix = 'yas3fs-' + self.s3_bucket_name + '-'
if not self.s3_prefix == '':
cache_path_prefix += self.s3_prefix.replace('/', '-') + '-'
self.cache_path = mkdtemp(prefix = cache_path_prefix)
logger.info("Cache path (on disk): '%s'" % self.cache_path)
self.cache = FSCache(self.cache_path)
self.publish_queue = Queue()
self.s3_queue = {} # Of Queue()
for i in range(self.s3_num):
self.s3_queue[i] = Queue()
self.download_queue = Queue()
self.prefetch_queue = Queue()
# AWS Initialization
if not self.aws_region in (r.name for r in boto.s3.regions()):
error_and_exit("wrong AWS region '%s' for S3" % self.aws_region)
try:
s3kw = {
'calling_format': boto.s3.connection.OrdinaryCallingFormat(),
}
if options.s3_use_sigv4:
os.environ['S3_USE_SIGV4'] = 'True'
if options.s3_endpoint:
s3kw['host'] = options.s3_endpoint
self.s3 = boto.connect_s3(**s3kw)
except boto.exception.NoAuthHandlerFound:
error_and_exit("no AWS credentials found")
if not self.s3:
error_and_exit("no S3 connection")
try:
self.s3_bucket = self.s3.get_bucket(self.s3_bucket_name, headers=self.default_headers, validate=False)
# If an endpoint was not specified, make sure we're talking to S3 in the correct region.
if not options.s3_endpoint:
region_name = self.s3_bucket.get_location()
if not region_name:
region_name = "us-east-1"
logger.debug("Bucket is in region %s", region_name)
# Look for the region's endpoint via Boto.
for region in boto.s3.regions():
if region.name == region_name:
s3kw['host'] = region.endpoint
break
else:
# Assume s3.${region_name}.amazonaws.com.
# This is a hack, but should support new regions that
# aren't known to this version of Boto.
s3kw['host'] = "s3.%s.amazonaws.com" % region_name
# Reconnect to s3.
self.s3 = boto.connect_s3(**s3kw)
self.s3_bucket = self.s3.get_bucket(self.s3_bucket_name, headers=self.default_headers, validate=False)
self.s3_bucket.key_class = UTF8DecodingKey
except boto.exception.S3ResponseError as e:
error_and_exit("S3 bucket not found:" + str(e))
pattern = re.compile('[\W_]+') # Alphanumeric characters only, to be used for pattern.sub('', s)
unique_id_list = []
if options.id:
unique_id_list.append(pattern.sub('', options.id))
unique_id_list.append(str(uuid.uuid4()))
self.unique_id = '-'.join(unique_id_list)
logger.info("Unique node ID: '%s'" % self.unique_id)
if self.sns_topic_arn:
if not self.aws_region in (r.name for r in boto.sns.regions()):
error_and_exit("wrong AWS region '%s' for SNS" % self.aws_region)
self.sns = boto.sns.connect_to_region(self.aws_region)
if not self.sns:
error_and_exit("no SNS connection")
try:
topic_attributes = self.sns.get_topic_attributes(self.sns_topic_arn)
except boto.exception.BotoServerError:
error_and_exit("SNS topic ARN not found in region '%s' " % self.aws_region)
if not self.sqs_queue_name and not self.new_queue:
if not (self.hostname and self.sns_http_port):
error_and_exit("With and SNS topic either the SQS queue name or the hostname and port to listen to SNS HTTP notifications must be provided")
if self.sqs_queue_name or self.new_queue:
self.queue = None
if not self.sns_topic_arn:
error_and_exit("The SNS topic must be provided when an SQS queue is used")
if not self.aws_region in (r.name for r in boto.sqs.regions()):
error_and_exit("wrong AWS region '" + self.aws_region + "' for SQS")
self.sqs = boto.sqs.connect_to_region(self.aws_region)
if not self.sqs:
error_and_exit("no SQS connection")
if self.new_queue:
hostname_array = []
hostname = ''
if self.new_queue_with_hostname:
import socket
hostname = socket.gethostname()
# trims to the left side only
hostname = re.sub(r'[^A-Za-z0-9\-].*', '', hostname)
# removes dashes and other chars
hostname = re.sub(r'[^A-Za-z0-9]', '', hostname)
hostname_array = [hostname]
self.sqs_queue_name = '-'.join([ 'yas3fs',
pattern.sub('', self.s3_bucket_name),
pattern.sub('', self.s3_prefix),
hostname,
self.unique_id])
self.sqs_queue_name = self.sqs_queue_name[:80] # fix for https://github.com/danilop/yas3fs/issues/40
self.sqs_queue_name = re.sub(r'-+', '-', self.sqs_queue_name)
logger.info("Attempting to create SQS queue: " + self.sqs_queue_name)
else:
self.queue = self.sqs.lookup(self.sqs_queue_name)
if not self.queue:
try:
self.queue = self.sqs.create_queue(self.sqs_queue_name)
except boto.exception.SQSError as sqsErr:
error_and_exit("Unexpected error creating SQS queue:" + str(sqsErr))
logger.info("SQS queue name (new): '%s'" % self.sqs_queue_name)
self.queue.set_message_class(boto.sqs.message.RawMessage) # There is a bug with the default Message class in boto
self.current_user_principalId = None
try:
iam = boto.connect_iam()
self.current_user_principalId = 'AWS:'+iam.get_user()['get_user_response']['get_user_result']['user']['user_id']
logger.info("Current user principalId: "+self.current_user_principalId)
except Exception as e:
try:
sts = boto3.client('sts')
self.current_user_principalId = 'AWS:'+sts.get_caller_identity()['UserId']
logger.info("Current user principalId: "+self.current_user_principalId)
except Exception as e:
logger.warn("Failed to get current user principalId: "+str(e))
if self.hostname or self.sns_http_port:
if not self.sns_topic_arn:
error_and_exit("The SNS topic must be provided when the hostname/port to listen to SNS HTTP notifications is given")
if self.sns_http_port:
if not self.hostname:
error_and_exit("The hostname must be provided with the port to listen to SNS HTTP notifications")
### self.http_listen_path = '/sns/' + base64.urlsafe_b64encode(os.urandom(self.http_listen_path_length))
self.http_listen_path = '/sns'
self.http_listen_url = "http://%s:%i%s" % (self.hostname, self.sns_http_port, self.http_listen_path)
if self.multipart_size < 5242880:
error_and_exit("The minimum size for multipart upload supported by S3 is 5MB")
if self.multipart_retries < 1:
error_and_exit("The number of retries for multipart uploads cannot be less than 1")
self.plugin = None
if (options.with_plugin_file):
self.plugin = YAS3FSPlugin.load_from_file(self, options.with_plugin_file, options.with_plugin_class)
elif (options.with_plugin_class):
self.plugin = YAS3FSPlugin.load_from_class(self, options.with_plugin_class)
if self.plugin:
self.plugin.logger = logger
# save this object for later use in remove_empty_dirs()
global yas3fsobj
yas3fsobj = self
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
# faking the funk, get a better wrapper model later
def withplugin(fn):
def fn_wrapper(*arg, **karg):
self = arg[0]
if self.plugin is None:
return fn(*arg, **karg)
try:
handlerFn = getattr(self.plugin, fn.__name__)
return handlerFn(fn).__call__(*arg, **karg)
except:
return fn(*arg, **karg)
return fn_wrapper
def check_threads(self, first=False):
logger.debug("check_threads '%s'" % first)
if first:
display = 'Starting'
else:
display = 'Restarting'
for i in range(self.s3_num):
if thread_is_not_alive(self.s3_threads[i]):
logger.debug("%s S3 thread #%i" % (display, i))
self.s3_threads[i] = TracebackLoggingThread(target=self.get_to_do_on_s3, args=(i,), name=("S3Thread-%04d" % i))
self.s3_threads[i].deamon = False
self.s3_threads[i].start()
for i in range(self.download_num):
if thread_is_not_alive(self.download_threads[i]):
logger.debug("%s download thread #%i" % (display, i))
self.download_threads[i] = TracebackLoggingThread(target=self.download, name=("Download-%04d" % i))
self.download_threads[i].deamon = True
self.download_threads[i].start()
for i in range(self.prefetch_num):
if thread_is_not_alive(self.prefetch_threads[i]):
logger.debug("%s prefetch thread #%i" % (display, i))
self.prefetch_threads[i] = TracebackLoggingThread(target=self.download, args=(True,), name=("Prefetch-%04d" % i))
self.prefetch_threads[i].deamon = True
self.prefetch_threads[i].start()
if self.sns_topic_arn:
if thread_is_not_alive(self.publish_thread):
logger.debug("%s publish thread" % display)
self.publish_thread = TracebackLoggingThread(target=self.publish_messages, name="SNSPublisher")
self.publish_thread.daemon = True
self.publish_thread.start()
if self.sqs_queue_name:
if thread_is_not_alive(self.queue_listen_thread):
logger.debug("%s queue listen thread" % display)
self.queue_listen_thread = TracebackLoggingThread(target=self.listen_for_messages_over_sqs, name="SQSListener")
self.queue_listen_thread.daemon = True
self.queue_listen_thread.start()
if self.sns_http_port:
if thread_is_not_alive(self.http_listen_thread):
logger.debug("%s HTTP listen thread" % display)
self.http_listen_thread = TracebackLoggingThread(target=self.listen_for_messages_over_http, name="HTTPListener")
self.http_listen_thread.daemon = True
self.http_listen_thread.start()
if thread_is_not_alive(self.check_cache_thread):
logger.debug("%s check cache thread" % display)
self.check_cache_thread = TracebackLoggingThread(target=self.check_cache_size, name="CacheChecker")
self.check_cache_thread.daemon = True
self.check_cache_thread.start()
def init(self, path):
threading.current_thread().name = "FUSE"
logger.debug("init '%s'" % (path))
self.s3_threads = {}
for i in range(self.s3_num):
self.s3_threads[i] = None
self.download_threads = {}
for i in range(self.download_num):
self.download_threads[i] = None
self.prefetch_threads = {}
for i in range(self.prefetch_num):
self.prefetch_threads[i] = None
self.publish_thread = None
self.queue_listen_thread = None
self.http_listen_thread = None
self.check_cache_thread = None
self.check_threads(first=True)
self.check_status_thread = TracebackLoggingThread(target=self.check_status, name="StatusChecker")
self.check_status_thread.daemon = True
self.check_status_thread.start()
if self.sqs_queue_name:
logger.debug("Subscribing '%s' to '%s'" % (self.sqs_queue_name, self.sns_topic_arn))
response = self.sns.subscribe_sqs_queue(self.sns_topic_arn, self.queue)
self.sqs_subscription = response['SubscribeResponse']['SubscribeResult']['SubscriptionArn']
logger.debug('SNS SQS subscription = %s' % self.sqs_subscription)
if self.sns_http_port:
self.http_listen_thread = None
self.sns.subscribe(self.sns_topic_arn, 'http', self.http_listen_url)
def signal_handler(self, signum, frame):
logger.info("signal_handler RECEIVED %s", signum)
if signum == signal.SIGINT:
logger.info("interrupt RECEIVED SIGINT %s", signum)
self.destroy('/')
elif signum == signal.SIGHUP:
logger.info("hangup RECEIVED SIGHUP %s", signum)
logger.info("signal_handler DONE %s", signum)
def flush_all_cache(self):
logger.debug("flush_all_cache")
with self.cache.lock:
for path in self.cache.entries:
data = self.cache.get(path, 'data')
if data and data.has('change'):
self.upload_to_s3(path, data)
def destroy(self, path):
logger.debug("destroy '%s'" % (path))
# Cleanup for unmount
logger.info('File system unmount...')
self.running = False
if self.http_listen_thread:
self.httpd.shutdown() # To stop HTTP listen thread
logger.info("waiting for HTTP listen thread to shutdown...")
self.http_listen_thread.join(5.0) # 5 seconds should be enough
logger.info("HTTP listen thread ended")
self.sns.unsubscribe(self.http_subscription)
logger.info("Unsubscribed SNS HTTP endpoint")
if self.queue_listen_thread:
self.sqs_queue_name = None # To stop queue listen thread
logger.info("waiting for SQS listen thread to shutdown...")
self.queue_listen_thread.join(self.queue_wait_time + 1.0)
logger.info("SQS listen thread ended")
self.sns.unsubscribe(self.sqs_subscription)
logger.info("Unsubscribed SNS SQS endpoint")
if self.new_queue:
if self.sqs.delete_queue(self.queue):
logger.info("New queue deleted")
else:
logger.error("New queue was not deleted")
self.flush_all_cache()
if self.sns_topic_arn:
while not self.publish_queue.empty():
time.sleep(1.0)
self.sns_topic_arn = None # To stop publish thread
logger.info("waiting for SNS publish thread to shutdown...")
self.publish_thread.join(2.0) # 2 seconds should be enough
if self.cache_entries:
self.cache_entries = 0 # To stop memory thread
logger.info("waiting for check cache thread to shutdown...")
self.check_cache_thread.join(self.cache_check_interval + 1.0)
logger.info("deleting cache_path %s ..." % self.cache_path)
rmtree(self.cache_path)
logger.info('File system unmounted.')
def listen_for_messages_over_http(self):
logger.info("Listening on: '%s'" % self.http_listen_url)
server_class = SNS_HTTPServer
handler_class = SNS_HTTPRequestHandler
server_address = ('', self.sns_http_port)
self.httpd = server_class(server_address, handler_class)
self.httpd.set_fs(self)
self.httpd.serve_forever()
def listen_for_messages_over_sqs(self):
logger.info("Listening on queue: '%s'" % self.queue.name)
while self.sqs_queue_name:
if self.queue_wait_time > 0:
# Using SQS long polling, needs boto >= 2.7.0
messages = self.queue.get_messages(10, wait_time_seconds=self.queue_wait_time)
else:
messages = self.queue.get_messages(10)
logger.debug("Got %i messages from SQS" % len(messages))
if messages:
for m in messages:
content = json.loads(m.get_body())
if 'Message' in content:
message = content['Message'].encode('ascii')
self.process_message(message)
elif 'Records' in content:
# Support S3 native bucket events
for event in content['Records']:
self.process_native_s3_event(event)
else:
# eg: "Service":"Amazon S3","Event":"s3:TestEvent"...
logger.warn("Unknown SQS message: "+repr(content))
m.delete()
else:
if self.queue_polling_interval > 0:
time.sleep(self.queue_polling_interval)
def invalidate_cache(self, path, etag=None):
logger.debug("invalidate_cache '%s' '%s'" % (path, etag))
with self.cache.get_lock(path):
self.cache.delete(path, 'key')
self.cache.delete(path, 'attr')
self.cache.delete(path, 'xattr')
self.cache.delete(path, 'deleting')
self.cache.delete(path, 's3_busy')
data = self.cache.get(path, 'data')
if data:
if data.has('range'):
self.cache.delete(path, 'data')
else:
data.set('new', etag)
if self.cache.is_empty(path):
self.cache.delete(path) # But keep it in the parent readdir
def delete_cache(self, path):
logger.debug("delete_cache '%s'" % (path))
with self.cache.get_lock(path):
self.cache.delete(path)
self.reset_parent_readdir(path)
def process_message(self, messages):
logger.debug("process_message '%s'" % (messages))
# Discard malformed JSON https://github.com/danilop/yas3fs/issues/141
try:
c = json.loads(messages)
except Exception as e:
logger.debug("parsing error: %s" % (e))
logger.warn("process_message discarding malformed message")
return
if c[0] == self.unique_id:
# discard message coming from itself
logger.debug("process message from self discarded '%s'"%(c))
return
if c[1] in ( 'mkdir', 'mknod', 'symlink' ) and c[2] != None:
self.delete_cache(c[2])
elif c[1] in ( 'rmdir', 'unlink' ) and c[2] != None:
self.delete_cache(c[2])
elif c[1] == 'rename' and c[2] != None and c[3] != None:
self.delete_cache(c[2])
self.delete_cache(c[3])
elif c[1] == 'upload':
if c[2] != None and len(c) == 4: # fix for https://github.com/danilop/yas3fs/issues/42
self.invalidate_cache(c[2], c[3])
else: # Invalidate all the cached data
for path in list(self.cache.entries.keys()):
self.invalidate_cache(path)
elif c[1] == 'md':
if c[2]:
self.delete_cache(c[2])
self.delete_cache(c[3])
elif c[1] == 'reset':
if len(c) <= 2 or not c[2] or c[2] == '/':
with self.cache.lock:
self.flush_all_cache()
self.cache.reset_all() # Completely reset the cache
else:
# c[2] exists and is not the root directory
for path in list(self.cache.entries.keys()):
# If the reset path is a directory and it matches
# the directory in the cache, it will delete the
# parent directory cache as well.
if path.startswith(c[2]):
self.delete_cache(path)
elif c[1] == 'url':
with self.cache.lock:
self.flush_all_cache()
self.cache.reset_all() # Completely reset the cache
s3url = urlparse(c[2])
if s3url.scheme != 's3':
error_and_exit("The S3 path to mount must be in URL format: s3://BUCKET/PATH")
self.s3_bucket_name = s3url.netloc
logger.info("S3 bucket: '%s'" % self.s3_bucket_name)
self.s3_prefix = s3url.path.strip('/')
logger.info("S3 prefix: '%s'" % self.s3_prefix)
try:
self.s3_bucket = self.s3.get_bucket(self.s3_bucket_name, headers=self.default_headers, validate=False)
self.s3_bucket.key_class = UTF8DecodingKey
except boto.exception.S3ResponseError as e:
error_and_exit("S3 bucket not found:" + str(e))
elif c[1] == 'cache':
if c[2] == 'entries' and c[3] > 0:
self.cache_entries = int(c[3])
elif c[2] == 'mem' and c[3] > 0:
self.cache_mem_size = int(c[3]) * (1024 * 1024) # MB
elif c[2] == 'disk' and c[3] > 0:
self.cache_disk_size = int(c[3]) * (1024 * 1024) # MB
elif c[1] == 'buffer' and c[3] >= 0:
if c[2] == 'size':
self.buffer_size = int(c[3]) * 1024 # KB
elif c[2] == 'prefetch':
self.buffer_prefetch = int(c[3])
elif c[1] == 'prefetch':
if c[2] == 'on':
self.full_prefetch = True
elif c[2] == 'off':
self.full_prefetch = False
elif c[1] == 'multipart':
if c[2] == 'size' and c[3] >= 5120:
self.multipart_size = c[3] * 1024
elif c[2] == 'num' and c[3] >= 0:
self.multipart_num = c[3]
elif c[2] == 'retries' and c[3] >= 1:
self.multipart_retries = c[3]
elif c[1] == 'ping':
self.publish_status()
def process_native_s3_event(self, event):
event_kind = event['eventName']
path = '/'+event['s3']['object']['key'].strip('/') # want '/abc/folder' while on s3 it's 'abc/folder/'
user_id = event['userIdentity']['principalId']
if user_id == self.current_user_principalId:
logger.debug("Native S3 event %s on %s from current yas3fs user %s discarded" % (event_kind, path, user_id))
return
logger.info("Native S3 event %s on %s by %s. Deleting cache for %s" % (event_kind, path, user_id, path))
self.delete_cache(path)
def publish_status(self):
hostname = socket.getfqdn()
num_entries, mem_size, disk_size = self.cache.get_memory_usage()
dq = self.download_queue.qsize()
pq = self.prefetch_queue.qsize()
s3q = 0
for i in range(0, self.s3_num):
s3q += self.s3_queue[i].qsize()
message = [ 'status', hostname, num_entries, mem_size, disk_size, dq, pq, s3q ]
self.publish(message)
def publish_messages(self):
while self.sns_topic_arn:
try:
message = self.publish_queue.get(True, 1) # 1 second time-out
message = copy.copy(message)
message.insert(0, self.unique_id)
full_message = json.dumps(message)
self.sns.publish(self.sns_topic_arn, full_message.encode('ascii'))
self.publish_queue.task_done()
except QueueEmpty:
pass
except Exception as e:
logger.exception(e)
logger.error("publish exception: " + full_message.encode('ascii'))
raise e
def publish(self, message):
if self.sns_topic_arn:
logger.debug("publish '%s'" % (message))
self.publish_queue.put(message)
def check_status(self):
logger.debug("check_status")
while self.cache_entries:
num_entries, mem_size, disk_size = self.cache.get_memory_usage()
s3q = 0 ### Remove duplicate code
for i in range(0, self.s3_num):
s3q += self.s3_queue[i].qsize()
logger.info("entries, mem_size, disk_size, download_queue, prefetch_queue, s3_queue: %i, %i, %i, %i, %i, %i"
% (num_entries, mem_size, disk_size,
self.download_queue.qsize(), self.prefetch_queue.qsize(), s3q))
logger.info("multipart_uploads_in_progress = " + str(self.multipart_uploads_in_progress))
if debug:
logger.debug("new_locks, unused_locks: %i, %i"
% (len(self.cache.new_locks), len(self.cache.unused_locks)))
(threshold0, threshold1, threshold2) = gc.get_threshold()
(count0, count1, count2) = gc.get_count()
logger.debug("gc count0/threshold0, count1/threshold1, count2/threshold2: %i/%i, %i/%i, %i/%i"
% (count0,threshold0,count1,threshold1,count2,threshold2))
self.check_threads()
time.sleep(self.check_status_interval)
def check_cache_size(self):
logger.debug("check_cache_size")
while self.cache_entries:
num_entries, mem_size, disk_size = self.cache.get_memory_usage()
logger.debug("check_cache_size get_memory_usage() -> num_entries=%r mem_size=%r disk_size=%r", num_entries, mem_size, disk_size)
purge = False
if num_entries > self.cache_entries:
purge = True
store = ''
elif mem_size > self.cache_mem_size:
purge = True
store = 'mem'
elif disk_size > self.cache_disk_size:
purge = True
store = 'disk'
if purge:
# Need to purge something
path = self.cache.lru.popleft() # Take a path on top of the LRU (least used)
with self.cache.get_lock(path):
if self.cache.has(path): # Path may be deleted before I acquire the lock
logger.debug("check_cache_size purge: '%s' '%s' ?" % (store, path))
data = self.cache.get(path, 'data')
full_delete = False
if (not data) or (data and (store == '' or data.store == store) and (not data.has('open')) and (not data.has('change'))):
if store == '':
logger.debug("check_cache_size purge: '%s' '%s' OK full" % (store, path))
self.cache.delete(path) # Remove completely from cache
full_delete = True
elif data:
logger.debug("check_cache_size purge: '%s' '%s' OK data" % (store, path))
self.cache.delete(path, 'data') # Just remove data
else:
logger.debug("check_cache_size purge: '%s' '%s' KO no data" % (store, path))
else:
logger.debug("check_cache_size purge: '%s' '%s' KO data? %s open? %s change? %s"
% (store, path, data != None, data and data.has('open'), data and data.has('change')))
if not full_delete:
# The entry is still there, let's append it again at the end of the RLU list
self.cache.lru.append(path)
else:
# Check for unused locks to be removed
for path in list(self.cache.unused_locks.keys()):
logger.debug("check_cache_size purge unused lock: '%s'" % (path))
try:
with self.cache.lock and self.cache.new_locks[path]:
del self.cache.new_locks[path]
logger.debug("check_cache_size purge unused lock: '%s' deleted" % (path))
except KeyError:
pass
try:
del self.cache.unused_locks[path]
logger.debug("check_cache_size purge unused lock: '%s' removed from list" % (path))
except KeyError:
pass
# Look for unused locks to be removed at next iteration (if still "new")
for path in self.cache.new_locks.keys():
logger.debug("check_cache_size purge unused lock: '%s' added to list" % (path))
self.cache.unused_locks[path] = True # Just a flag
# Sleep for some time
time.sleep(self.cache_check_interval)
def add_to_parent_readdir(self, path):
logger.debug("add_to_parent_readdir '%s'" % (path))
(parent_path, dir) = os.path.split(path)
logger.debug("add_to_parent_readdir '%s' parent_path '%s'" % (path, parent_path))
with self.cache.get_lock(parent_path):
dirs = self.cache.get(parent_path, 'readdir')
if dirs != None and dirs.count(dir) == 0:
dirs.append(dir)
def remove_from_parent_readdir(self, path):
logger.debug("remove_from_parent_readdir '%s'" % (path))
(parent_path, dir) = os.path.split(path)
logger.debug("remove_from_parent_readdir '%s' parent_path '%s'" % (path, parent_path))
with self.cache.get_lock(parent_path):
dirs = self.cache.get(parent_path, 'readdir')
if dirs != None:
try:
dirs.remove(dir)
except:
# not in cache, no worries.
pass
def reset_parent_readdir(self, path):
logger.debug("reset_parent_readdir '%s'" % (path))
(parent_path, dir) = os.path.split(path)
logger.debug("reset_parent_readdir '%s' parent_path '%s'" % (path, parent_path))
self.cache.delete(parent_path, 'readdir')
def remove_prefix(self, keyname):
if self.s3_prefix == '':
return '/' + keyname
return keyname[len(self.s3_prefix):]
def join_prefix(self, path):
if self.s3_prefix == '':
if path != '/':
return path[1:] # Remove beginning '/'
else:
return '.' # To handle '/' with empty s3_prefix
else:
return self.s3_prefix + path
def has_elements(self, iter, num=1):
logger.debug("has_element '%s' %i" % (iter, num))
c = 0
for k in iter:
logger.debug("has_element '%s' -> '%r'" % (iter, k))
path = k.name[len(self.s3_prefix):]
if not self.cache.is_deleting(path):
c += 1
if c >= num:
logger.debug("has_element '%s' OK" % (iter))
return True
logger.debug("has_element '%s' KO" % (iter))
return False
def folder_has_contents(self, path, num=1):
logger.debug("folder_has_contents '%s' %i" % (path, num))
full_path = self.join_prefix(path + '/')
# encoding for https://github.com/danilop/yas3fs/issues/56
key_list = self.s3_bucket.list(full_path.encode('utf-8'), '/', headers = self.default_headers)
return self.has_elements(key_list, num)
def get_key(self, path, cache=True):
if self.cache.is_deleting(path):
logger.debug("get_key path '%s' is deleting -- returning None" % (path))
return None
if cache and self.cache.is_ready(path):
key = self.cache.get(path, 'key')
if key:
logger.debug("get_key from cache '%s'" % (path))
return key
logger.debug("get_key %s", path)
look_on_S3 = True
refresh_readdir_cache_if_found = False
if path != '/':
(parent_path, file) = os.path.split(path)
dirs = self.cache.get(parent_path, 'readdir')
if dirs and file not in dirs:
refresh_readdir_cache_if_found = True
if not self.recheck_s3:
look_on_S3 = False
if look_on_S3:
logger.debug("get_key from S3 #1 '%s'" % (path))
# encoding for https://github.com/danilop/yas3fs/issues/56
key = self.s3_bucket.get_key(self.join_prefix(path).encode('utf-8'), headers=self.default_headers)
if not key and path != '/':
full_path = path + '/'
logger.debug("get_key from S3 #2 '%s' '%s'" % (path, full_path))
# encoding for https://github.com/danilop/yas3fs/issues/56
key = self.s3_bucket.get_key(self.join_prefix(full_path).encode('utf-8'), headers=self.default_headers)
if key:
key = UTF8DecodingKey(key)
key.name = key.name.decode('utf-8')
logger.debug("get_key to cache '%s'" % (path))
###self.cache.delete(path) ### ???
###self.cache.add(path)
self.cache.set(path, 'key', key)
if refresh_readdir_cache_if_found:
self.add_to_parent_readdir(path)
else:
logger.debug("get_key not on S3 '%s'" % (path))
if not key:
logger.debug("get_key no '%s'" % (path))
return key
def get_metadata(self, path, metadata_name, key=None):
logger.debug("get_metadata -> '%s' '%s' '%r'" % (path, metadata_name, key))
with self.cache.get_lock(path): # To avoid consistency issues, e.g. with a concurrent purge
metadata_values = None
if self.cache.has(path, metadata_name):
metadata_values = self.cache.get(path, metadata_name)
if metadata_values is None:
metadata_values = {}
if not key:
key = self.get_key(path)
if not key:
if path == '/': # First time mount of a new file system
self.mkdir(path, 0o0755)
logger.debug("get_metadata -> '%s' '%s' First time mount"
% (path, metadata_name))
return self.cache.get(path, metadata_name)
else:
if not self.folder_has_contents(path):
self.cache.add(path) # It is empty to cache further checks
logger.debug("get_metadata '%s' '%s' no S3 return None"
% (path, metadata_name))
return None
else:
logger.debug("get_metadata '%s' '%s' '%r' S3 found"
% (path, metadata_name, key))
if key:
s = key.get_metadata(metadata_name)
else:
s = None
if s:
try:
metadata_values = json.loads(s)
except ValueError: # For legacy attribute encoding
for kv in s.split(';'):
k, v = kv.split('=')
if v.isdigit():
metadata_values[k] = int(v)
elif v.replace(".", "", 1).isdigit():
metadata_values[k] = float(v)
else:
metadata_values[k] = v
if metadata_name == 'attr': # Custom exception(s)
if key:
metadata_values['st_size'] = key.size
else:
metadata_values['st_size'] = 0
if not s: # Set default attr to browse any S3 bucket TODO directories
uid, gid = get_uid_gid()
metadata_values['st_uid'] = uid
metadata_values['st_gid'] = gid
if key is None:
### # no key, default to dir
metadata_values['st_mode'] = (stat.S_IFDIR | 0o0755)
elif key and key.name != '' and key.name[-1] != '/':
metadata_values['st_mode'] = (stat.S_IFREG | 0o0755)
else:
metadata_values['st_mode'] = (stat.S_IFDIR | 0o0755)
if key and key.last_modified:
now = time.mktime(time.strptime(key.last_modified, "%a, %d %b %Y %H:%M:%S %Z"))
else:
now = get_current_time()
metadata_values['st_mtime'] = now
metadata_values['st_atime'] = now
metadata_values['st_ctime'] = now
self.cache.add(path)
self.cache.set(path, metadata_name, metadata_values)
logger.debug("get_metadata <- '%s' '%s' '%r' '%s'" % (path, metadata_name, key, metadata_values))
return metadata_values
def set_metadata(self, path, metadata_name=None, metadata_values=None, key=None):
logger.debug("set_metadata '%s' '%s' '%r'" % (path, metadata_name, key))
with self.cache.get_lock(path):
if not metadata_values is None:
self.cache.set(path, metadata_name, metadata_values)
data = self.cache.get(path, 'data')
if self.write_metadata and (key or (not data) or (data and not data.has('change'))):
# No change in progress, I should write now
if not key:
key = self.get_key(path)
logger.debug("set_metadata '%s' '%s' '%r' Key" % (path, metadata_name, key))
new_key = False
if not key and self.folder_has_contents(path):
if path != '/' or self.write_metadata:
full_path = path + '/'
key = UTF8DecodingKey(self.s3_bucket)
key.key = self.join_prefix(full_path)
new_key = True
if key:
if metadata_name:
values = metadata_values
if values is None:
values = self.cache.get(path, metadata_name)
if values is None or not any(values):
try:
del key.metadata[metadata_name]
except KeyError:
pass
else:
try:
key.metadata[metadata_name] = json.dumps(values)
except UnicodeDecodeError:
logger.info("set_metadata '%s' '%s' '%r' cannot decode unicode, not written on S3"
% (path, metadata_name, key))
pass # Ignore the binary values - something better TODO ???
if (not data) or (data and (not data.has('change'))):
logger.debug("set_metadata '%s' '%r' S3" % (path, key))
pub = [ 'md', metadata_name, path ]
if new_key:
logger.debug("set_metadata '%s' '%r' S3 new key" % (path, key))
### key.set_contents_from_string('', headers={'Content-Type': 'application/x-directory'})
headers = { 'Content-Type': 'application/x-directory' }
headers.update(self.default_write_headers)
cmds = [ [ 'set_contents_from_string', [ '' ], { 'headers': headers } ] ]
self.do_on_s3(key, pub, cmds)
else:
### key.copy(key.bucket.name, key.name, key.metadata, preserve_acl=False)
if isinstance(key.name, bytes):
key_name = key.name.decode('utf-8')
else:
key_name = key.name
cmds = [ [ 'copy', [ key.bucket.name, key_name, key.metadata ],
{ 'preserve_acl': False, 'encrypt_key':self.aws_managed_encryption } ] ]
self.do_on_s3(key, pub, cmds)
###self.publish(['md', metadata_name, path])
# handle a request to set metadata, but we can't right now because the node is currently
# in the middle of a 'change' https://github.com/danilop/yas3fs/issues/52
elif self.write_metadata and data and data.has('change'):
if metadata_name == 'attr' and metadata_values is None:
logger.debug("set_metadata: 'change' already in progress, setting FSData.props[invoke_after_change] lambda for self.set_metadata("+path+",attr)")
data.set('invoke_after_change',(lambda path: self.set_metadata(path,'attr')))
def getattr(self, path, fh=None):
logger.debug("getattr -> '%s' '%s'" % (path, fh))
if self.cache.is_deleting(path):
logger.debug("getattr path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path): # To avoid consistency issues, e.g. with a concurrent purge
recheck_s3 = False
if self.cache.is_empty(path):
cache = True
logger.debug("getattr <- '%s' '%s' cache ENOENT" % (path, fh))
if self.recheck_s3:
cache = False
recheck_s3 = True
logger.debug("getattr rechecking on s3 <- '%s' '%s' cache ENOENT" % (path, fh))
else:
raise FuseOSError(errno.ENOENT)
attr = self.get_metadata(path, 'attr')
if attr is None:
logger.debug("getattr <- '%s' '%s' ENOENT" % (path, fh))
raise FuseOSError(errno.ENOENT)
if attr['st_size'] == 0 and stat.S_ISDIR(attr['st_mode']):
attr['st_size'] = 4096 # For compatibility...
attr['st_nlink'] = 1 # Something better TODO ???
if self.st_blksize:
attr['st_blksize'] = self.st_blksize
if self.full_prefetch: # Prefetch
if stat.S_ISDIR(attr['st_mode']):
self.readdir(path)
else:
self.check_data(path)
logger.debug("getattr <- '%s' '%s' '%s'" % (path, fh, attr))
return attr
def readdir(self, path, fh=None):
logger.debug("readdir '%s' '%s'" % (path, fh))
if self.cache.is_deleting(path):
logger.debug("readdir path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("readdir '%s' '%s' ENOENT" % (path, fh))
raise FuseOSError(errno.ENOENT)
self.cache.add(path)
dirs = self.cache.get(path, 'readdir')
if not dirs:
logger.debug("readdir '%s' '%s' no cache" % (path, fh))
full_path = self.join_prefix(path)
if full_path == '.':
full_path = ''
elif full_path != '' and full_path[-1] != '/':
full_path += '/'
logger.debug("readdir '%s' '%s' S3 list '%s'" % (path, fh, full_path))
# encoding for https://github.com/danilop/yas3fs/issues/56
key_list = self.s3_bucket.list(full_path.encode('utf-8'), '/', headers = self.default_headers, encoding_type='url')
dirs = ['.', '..']
logger.debug('key names %s' % (', '.join([k.name for k in key_list])))
for k in key_list:
# 'unquoting' for https://github.com/danilop/yas3fs/issues/56
if sys.version_info < (3,):
k.name = unquote_plus(str(k.name)).decode('utf-8')
else:
k.name = unquote_plus(k.name)
logger.debug("readdir '%s' '%s' S3 list key '%r'" % (path, fh, k))
d = k.name[len(full_path):]
if len(d) > 0:
if d == '.':
continue # I need this for whole S3 buckets mounted without a prefix, I use '.' for '/' metadata
d_path = k.name[len(self.s3_prefix):]
if d[-1] == '/':
d = d[:-1]
if self.cache.is_deleting(d_path):
continue
dirs.append(d)
# for https://github.com/danilop/yas3fs/issues/56
if sys.version_info < (3,):
convertedDirs = []
for dir in dirs:
convertedDirs.append(unicode(dir))
dirs = convertedDirs
self.cache.set(path, 'readdir', dirs)
logger.debug("readdir '%s' '%s' '%s'" % (path, fh, dirs))
return dirs
def mkdir(self, path, mode):
logger.debug("mkdir '%s' '%s'" % (path, mode))
with self.cache.get_lock(path):
if self.cache.is_not_empty(path):
logger.debug("mkdir cache '%s' EEXIST" % self.cache.get(path))
raise FuseOSError(errno.EEXIST)
k = self.get_key(path)
if k and path != '/':
logger.debug("mkdir key '%s' EEXIST" % self.cache.get(path))
raise FuseOSError(errno.EEXIST)
now = get_current_time()
uid, gid = get_uid_gid()
attr = { 'st_uid': uid,
'st_gid': gid,
'st_atime': now,
'st_mtime': now,
'st_ctime': now,
'st_size': 0,
'st_mode': (stat.S_IFDIR | mode)
}
self.cache.delete(path)
self.cache.add(path)
data = FSData(self.cache, 'mem', path)
self.cache.set(path, 'data', data)
data.set('change', True)
k = UTF8DecodingKey(self.s3_bucket)
self.set_metadata(path, 'attr', attr, k)
self.set_metadata(path, 'xattr', {}, k)
self.cache.set(path, 'key', k)
if path != '/':
full_path = path + '/'
self.cache.set(path, 'readdir', ['.', '..']) # the directory is empty
self.add_to_parent_readdir(path)
else:
full_path = path # To manage '/' with an empty s3_prefix
if path != '/' or self.write_metadata:
k.key = self.join_prefix(full_path)
logger.debug("mkdir '%s' '%s' '%r' S3" % (path, mode, k))
###k.set_contents_from_string('', headers={'Content-Type': 'application/x-directory'})
pub = [ 'mkdir', path ]
headers = { 'Content-Type': 'application/x-directory'}
headers.update(self.default_write_headers)
cmds = [ [ 'set_contents_from_string', [ '' ], { 'headers': headers } ] ]
self.do_on_s3(k, pub, cmds)
data.delete('change')
###if path != '/': ### Do I need this???
### self.publish(['mkdir', path])
return 0
def symlink(self, path, link):
logger.debug("symlink '%s' '%s'" % (path, link))
with self.cache.get_lock(path):
if self.cache.is_not_empty(path):
logger.debug("symlink cache '%s' '%s' EEXIST" % (path, link))
raise FuseOSError(errno.EEXIST)
k = self.get_key(path)
if k:
logger.debug("symlink key '%s' '%s' EEXIST" % (path, link))
raise FuseOSError(errno.EEXIST)
now = get_current_time()
uid, gid = get_uid_gid()
attr = {}
attr['st_uid'] = uid
attr['st_gid'] = gid
attr['st_atime'] = now
attr['st_mtime'] = now
attr['st_ctime'] = now
attr['st_size'] = 0
attr['st_mode'] = (stat.S_IFLNK | 0o0755)
self.cache.delete(path)
self.cache.add(path)
if self.cache_on_disk > 0:
data = FSData(self.cache, 'mem', path) # New files (almost) always cache in mem - is it ok ???
else:
data = FSData(self.cache, 'disk', path)
self.cache.set(path, 'data', data)
data.set('change', True)
k = UTF8DecodingKey(self.s3_bucket)
self.set_metadata(path, 'attr', attr, k)
self.set_metadata(path, 'xattr', {}, k)
data.open()
self.write(path, link, 0)
data.close()
k.key = self.join_prefix(path)
self.cache.set(path, 'key', k)
self.add_to_parent_readdir(path)
logger.debug("symlink '%s' '%s' '%r' S3" % (path, link, k))
###k.set_contents_from_string(link, headers={'Content-Type': 'application/x-symlink'})
pub = [ 'symlink', path ]
headers = { 'Content-Type': 'application/x-symlink' }
headers.update(self.default_write_headers)
cmds = [ [ 'set_contents_from_string', [ link ], { 'headers': headers } ] ]
self.do_on_s3(k, pub, cmds)
data.delete('change')
###self.publish(['symlink', path])
return 0
def check_data(self, path):
logger.debug("check_data '%s'" % (path))
with self.cache.get_lock(path):
#-- jazzl0ver: had to add path checking due to untracable /by me/ cache leaking (workaround for issue #174)
data = self.cache.get(path, 'data')
if data and not os.path.exists(self.cache.get_cache_filename(path)):
logger.debug("Cache leak found for '%s', cleaning up..." % (path))
self.cache.delete(path)
with self.cache.lock and self.cache.new_locks[path]:
del self.cache.new_locks[path]
data = None
self.getattr(path)
if not data or data.has('new'):
k = self.get_key(path)
if not k:
logger.debug("check_data '%s' no key" % (path))
return False
if not data:
if k.size < self.cache_on_disk:
data = FSData(self.cache, 'mem', path)
else:
data = FSData(self.cache, 'disk', path)
self.cache.set(path, 'data', data)
new_etag = data.get('new')
etag = k.etag[1:-1]
if not new_etag or new_etag == etag:
data.delete('new')
else: # I'm not sure I got the latest version
logger.debug("check_data '%s' etag is different" % (path))
self.cache.delete(path, 'key') # Next time get the key from S3
data.set('new', None) # Next time don't check the Etag
if data.etag == etag:
logger.debug("check_data '%s' etag is the same, data is usable" % (path))
return True
data.update_size()
if k.size == 0:
logger.debug("check_data '%s' nothing to download" % (path))
return True # No need to download anything
elif self.buffer_size > 0: # Use buffers
if not data.has('range'):
data.set('range', FSRange())
logger.debug("check_data '%s' created empty data object" % (path))
else: # Download at once
if data.content is None:
data.open()
k.get_contents_to_file(data.content, headers = self.default_headers)
data.update_size()
data.update_etag(k.etag[1:-1])
logger.debug("check_data '%s' data downloaded at once" % (path))
else:
logger.debug("check_data '%s' data already in place" % (path))
return True
def enqueue_download_data(self, path, starting_from=0, length=0, prefetch=False):
logger.debug("enqueue_download_data '%s' %i %i" % (path, starting_from, length))
start_buffer = int(starting_from / self.buffer_size)
if length == 0: # Means to the end of file
key = self.get_key(path)
number_of_buffers = 1 + int((key.size - 1 - starting_from) / self.buffer_size)
else:
end_buffer = int((starting_from + length - 1) / self.buffer_size)
number_of_buffers = 1 + (end_buffer - start_buffer)
for i in range(number_of_buffers):
start = (start_buffer + i) * self.buffer_size
end = start + self.buffer_size - 1
option_list = (path, start, end)
if prefetch:
self.prefetch_queue.put(option_list)
else:
self.download_queue.put(option_list)
def download(self, prefetch=False):
while self.running:
try:
if prefetch:
(path, start, end) = self.prefetch_queue.get(True, 1) # 1 second time-out
else:
(path, start, end) = self.download_queue.get(True, 1) # 1 second time-out
self.download_data(path, start, end)
if prefetch:
self.prefetch_queue.task_done()
else:
self.download_queue.task_done()
except QueueEmpty:
pass
def download_data(self, path, start, end):
thread_name = threading.current_thread().name
logger.debug("download_data '%s' %i-%i [thread '%s']" % (path, start, end, thread_name))
original_key = self.get_key(path)
if original_key is None:
logger.debug("download_data no key (before) '%s' [thread '%s']"
% (path, thread_name))
return
logger.debug("type k = '%r'" % type(original_key))
logger.debug(" dir k = '%r'" % dir(original_key))
logger.debug(" k = '%r'" % original_key)
key = copy.copy(original_key)
if start > (key.size - 1):
logger.debug("download_data EOF '%s' %i-%i [thread '%s']" % (path, start, end, thread_name))
return
with self.cache.get_lock(path):
data = self.cache.get(path, 'data')
if not data:
logger.debug("download_data no data (before) '%s' [thread '%s']" % (path, thread_name))
return
data_range = data.get('range')
if not data_range:
logger.debug("download_data no range (before) '%s' [thread '%s']"
% (path, thread_name))
return
new_interval = [start, min(end, key.size - 1)]
if data_range.interval.contains(new_interval): ### Can be removed ???
logger.debug("download_data '%s' %i-%i [thread '%s'] already downloaded"
% (path, start, end, thread_name))
return
else:
for i in data_range.ongoing_intervals.values():
if i[0] <= new_interval[0] and i[1] >= new_interval[1]:
logger.debug("download_data '%s' %i-%i [thread '%s'] already downloading"
% (path, start, end, thread_name))
return
data_range.ongoing_intervals[thread_name] = new_interval
if new_interval[0] == 0 and new_interval[1] == key.size -1:
range_headers = {}
else:
range_headers = { 'Range' : 'bytes=' + str(new_interval[0]) + '-' + str(new_interval[1]) }
range_headers.update(self.default_headers) ### Should I check self.requester_pays first?
retry = True
# for https://github.com/danilop/yas3fs/issues/46
retriesAttempted = 0
while retry:
# for https://github.com/danilop/yas3fs/issues/62
if key is None:
logger.warn("download_data 'key' is None!.. exiting retry loop")
break
retriesAttempted += 1
# for https://github.com/danilop/yas3fs/issues/46
if retriesAttempted > self.download_retries_num:
retry = False
logger.debug("download_data range '%s' '%s' [thread '%s'] max: %i sleep: %i retries: %i" % (path, range_headers, thread_name, self.download_retries_num, self.download_retries_sleep, retriesAttempted))
try:
if debug:
n1=dt.datetime.now()
if range_headers: # Use range headers only if necessary
bytes = key.get_contents_as_string(headers=range_headers)
else:
bytes = key.get_contents_as_string()
if debug:
n2=dt.datetime.now()
retry = False
except Exception as e:
logger.exception(e)
logger.info("download_data error '%s' %i-%i [thread '%s'] -> retrying max: %i sleep: %i retries: %i" % (path, start, end, thread_name, self.download_retries_num, self.download_retries_sleep, retriesAttempted))
time.sleep(self.download_retries_sleep) # for https://github.com/danilop/yas3fs/issues/46
key = copy.copy(self.get_key(path)) # Do I need this to overcome error "caching" ???
if debug:
elapsed = (n2-n1).microseconds/1e6
logger.debug("download_data done '%s' %i-%i [thread '%s'] elapsed %.6f" % (path, start, end, thread_name, elapsed))
with self.cache.get_lock(path):
data = self.cache.get(path, 'data')
if not data:
logger.debug("download_data no data (after) '%s' [thread '%s']" % (path, thread_name))
return
data_range = data.get('range')
if not data_range:
logger.debug("download_data no range (after) '%s' [thread '%s']" % (path, thread_name))
return
del data_range.ongoing_intervals[thread_name]
if not bytes:
length = 0
logger.debug("download_data no bytes '%s' [thread '%s']" % (path, thread_name))
else:
length = len(bytes)
logger.debug("download_data %i bytes '%s' [thread '%s']" % (length, path, thread_name))
if length > 0:
with data.get_lock():
no_content = False
if not data.content: # Usually for prefetches
no_content = True
data.open()
data.content.seek(start)
data.content.write(bytes)
new_interval = [start, start + length - 1]
data_range.interval.add(new_interval)
data.update_size()
if no_content:
data.close()
data_range.wake()
logger.debug("download_data end '%s' %i-%i [thread '%s']" % (path, start, end, thread_name))
with self.cache.get_lock(path):
data = self.cache.get(path, 'data')
data_range = data.get('range')
if data_range:
if data_range.interval.contains([0, key.size - 1]): # -1 ???
data.delete('range')
data.update_etag(key.etag[1:-1])
logger.debug("download_data all ended '%s' [thread '%s']" % (path, thread_name))
def get_to_do_on_s3(self, i):
while self.running:
try:
(key, pub, cmds) = self.s3_queue[i].get(True, 1) # 1 second time-out
# MUTABLE PROTECTION
# various sections of do_cmd_on_s3_now have the potential
# of mutating pub, this tries to keep the queue clean
# in case a retry happens.
pub = copy.copy(pub)
self.do_on_s3_now(key, pub, cmds)
self.s3_queue[i].task_done()
except QueueEmpty:
pass
@withplugin
def do_on_s3(self, key, pub, cmds):
if self.s3_num == 0:
return self.do_on_s3_now(key, pub, cmds)
i = hash(key.name) % self.s3_num # To distribute files consistently across threads
self.s3_queue[i].put((key, pub, cmds))
@withplugin
def do_cmd_on_s3_now(self, key, pub, action, args, kargs):
logger.debug("do_cmd_on_s3_now action '%s' key '%r' args '%s' kargs '%s'" % (action, key, args, kargs))
# fuse/yas3fs is version unaware and all operation should
# happen to the current version
# also we don't track updated key.version_id in self.cache
# so it is likely that what was stored has been staled
key.version_id = None
try:
if action == 'delete':
path = pub[1]
key.delete()
del self.cache.entries[path]
elif action == 'copy':
# Otherwise we loose the Content-Type with S3 Copy
key.metadata['Content-Type'] = key.content_type
key.copy(*args, **kargs)
path = self.remove_prefix(args[1])
if path.endswith('/'):
# this is a directory, but interally stored w/o
# trailing slash
path = path[:-1]
# renaming?
if path != key.name:
# del self.cache.entries[path]
if self.cache.has(path, 's3_busy'):
self.cache.entries[path]['s3_busy'] = 0
elif action == 'set_contents_from_string':
key.set_contents_from_string(*args,**kargs)
elif action == 'set_contents_from_file':
data = args[0] # First argument must be data
if data.cache.is_deleting(data.path):
return None
try:
# ignore deleting flag, though will fail w/ IOError
key.set_contents_from_file(data.get_content(wait_until_cleared_proplist = ['s3_busy']),**kargs)
except IOError as e:
logger.error("set_contents_from_file IOError on " + str(data))
raise e
etag = key.etag[1:-1]
# ignore deleting flag
with data.get_lock(wait_until_cleared_proplist = ['s3_busy']):
data.update_etag(etag, wait_until_cleared_proplist = ['s3_busy'])
data.delete('change', wait_until_cleared_proplist = ['s3_busy'])
pub.append(etag)
elif action == 'multipart_upload':
data = args[1] # Second argument must be data
if data.cache.is_deleting(data.path):
return None
full_size = args[2] # Third argument must be full_size
complete = self.multipart_upload(*args)
uploaded_key = self.s3_bucket.get_key(key.name.encode('utf-8'), headers=self.default_headers)
logger.debug("Multipart-upload Key Sizes '%r' local: %i remote: %i" %(key, full_size, uploaded_key.size))
if full_size != uploaded_key.size:
logger.error("Multipart-upload Key Sizes do not match for '%r' local: %i remote: %i" %(key, full_size, uploaded_key.size))
raise Exception("Multipart-upload KEY SIZES DO NOT MATCH")
etag = complete.etag[1:-1]
self.cache.delete(data.path, 'key')
# ignore deleting flag
with data.get_lock(wait_until_cleared_proplist = ['s3_busy']):
data.update_etag(etag, wait_until_cleared_proplist = ['s3_busy'])
data.delete('change', wait_until_cleared_proplist = ['s3_busy'])
pub.append(etag)
else:
logger.error("do_cmd_on_s3_now Unknown action '%s'" % action)
# SHOULD THROW EXCEPTION...
except Exception as e:
logger.exception(e)
raise e
logger.debug("do_cmd_on_s3_now action '%s' key '%r' args '%s' kargs '%s' done" % (action, key, args, kargs))
return pub
@withplugin
def do_cmd_on_s3_now_w_retries(self, key, pub, action, args, kargs, retries = 1):
last_exception = None
for tries in range(1, retries +1):
if tries > 1:
time.sleep(self.s3_retries_sleep) # Better wait N seconds before retrying
try:
logger.debug("do_cmd_on_s3_now_w_retries try %s action '%s' key '%r' args '%s' kargs '%s'" % (tries, action, key, args, kargs))
return self.do_cmd_on_s3_now(key, pub, action, args, kargs)
except Exception as e:
last_exception = e
logger.error("do_cmd_on_s3_now_w_retries FAILED '%s' key '%r' args '%s' kargs '%s'" % (action, key, args, kargs))
raise last_exception
@withplugin
def do_on_s3_now(self, key, pub, cmds):
for c in cmds:
action = c[0]
args = None
kargs = None
if len(c) > 1:
args = c[1]
if len(c) > 2:
kargs = c[2]
pub = self.do_cmd_on_s3_now_w_retries(key, pub, action, args, kargs, self.s3_retries)
if pub:
self.publish(pub)
def readlink(self, path):
logger.debug("readlink '%s'" % (path))
if self.cache.is_deleting(path):
logger.debug("readlink path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("readlink '%s' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
self.cache.add(path)
if stat.S_ISLNK(self.getattr(path)['st_mode']):
if not self.check_data(path):
logger.debug("readlink '%s' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
data = self.cache.get(path, 'data')
if data is None:
logger.error("readlink '%s' no data ENOENT" % (path))
raise FuseOSError(errno.ENOENT) # ??? That should not happen
data_range = data.get('range')
if data_range:
self.enqueue_download_data(path)
# self.download_data(path)
while True:
logger.debug("readlink wait '%s'" % (path))
data_range.wait()
logger.debug("readlink awake '%s'" % (path))
data_range = data.get('range')
if not data_range:
break
data.open()
link = data.get_content_as_string()
data.close()
return link.decode('utf-8')
def rmdir(self, path):
logger.debug("rmdir '%s'" % (path))
if self.cache.is_deleting(path):
logger.debug("rmdir path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("rmdir '%s' cache ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
k = self.get_key(path)
if not k and not self.cache.has(path) and not self.folder_has_contents(path):
logger.debug("rmdir '%s' S3 ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
dirs = self.cache.get(path, 'readdir')
if dirs is None:
if self.folder_has_contents(path, 2): # There is something inside the folder
logger.debug("rmdir '%s' S3 ENOTEMPTY" % (path))
raise FuseOSError(errno.ENOTEMPTY)
else:
if len(dirs) > 2:
logger.debug("rmdir '%s' cache ENOTEMPTY" % (path))
raise FuseOSError(errno.ENOTEMPTY)
###k.delete()
###self.publish(['rmdir', path])
self.cache.reset(path, with_deleting = bool(k)) # Cache invaliation
self.remove_from_parent_readdir(path)
if k:
logger.debug("rmdir '%s' '%s' S3" % (path, k))
pub = [ 'rmdir', path ]
cmds = [ [ 'delete', [] , { 'headers': self.default_headers } ] ]
self.do_on_s3(k, pub, cmds)
return 0
def truncate(self, path, size):
logger.debug("truncate '%s' '%i'" % (path, size))
if self.cache.is_deleting(path):
logger.debug("truncate path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("truncate '%s' '%i' ENOENT" % (path, size))
raise FuseOSError(errno.ENOENT)
self.cache.add(path)
if not self.check_data(path):
logger.debug("truncate '%s' '%i' ENOENT" % (path, size))
raise FuseOSError(errno.ENOENT)
while True:
data = self.cache.get(path, 'data')
if not data:
logger.error("truncate '%s' '%i' no data ENOENT" % (path, size))
raise FuseOSError(errno.ENOENT) # ??? That should not happen
data_range = data.get('range')
if not data_range:
break
if (size == 0) or (data_range.interval.contains([0, size - 1])):
data.delete('range')
break
self.enqueue_download_data(path, 0, size)
logger.debug("truncate wait '%s' '%i'" % (path, size))
data_range.wait()
logger.debug("truncate awake '%s' '%i'" % (path, size))
data.content.truncate(size)
now = get_current_time()
attr = self.get_metadata(path, 'attr')
old_size = attr['st_size']
data.set('change', True)
if size != old_size:
attr['st_size'] = size
data.update_size()
attr['st_mtime'] = now
attr['st_atime'] = now
return 0
### Should work for files in cache but not flushed to S3...
def rename(self, path, new_path):
logger.debug("rename '%s' '%s'" % (path, new_path))
if self.cache.is_deleting(path):
logger.debug("rename path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("rename '%s' '%s' ENOENT no '%s' from cache" % (path, new_path, path))
raise FuseOSError(errno.ENOENT)
key = self.get_key(path)
if not key and not self.cache.has(path):
logger.debug("rename '%s' '%s' ENOENT no '%s'" % (path, new_path, path))
raise FuseOSError(errno.ENOENT)
new_parent_path = os.path.dirname(new_path)
new_parent_key = self.get_key(new_parent_path)
if not new_parent_key and not self.folder_has_contents(new_parent_path):
logger.debug("rename '%s' '%s' ENOENT no parent path '%s'" % (path, new_path, new_parent_path))
raise FuseOSError(errno.ENOENT)
attr = self.getattr(path)
if stat.S_ISDIR(attr['st_mode']):
self.rename_path(path, new_path)
else:
self.rename_item(path, new_path)
self.remove_from_parent_readdir(path)
self.add_to_parent_readdir(new_path)
def rename_path(self, path, new_path):
logger.debug("rename_path '%s' -> '%s'" % (path, new_path))
dirs = self.readdir(path)
for d in dirs:
if d in ['.', '..']:
continue
d_path = ''.join([path, '/', d])
d_new_path = ''.join([new_path, '/', d])
attr = self.getattr(d_path)
if stat.S_ISDIR(attr['st_mode']):
self.rename_path(d_path, d_new_path)
else:
self.rename_item(d_path, d_new_path)
self.rename_item(path, new_path, dir=True)
def rename_item(self, path, new_path, dir=False):
logger.debug("rename_item '%s' -> '%s' dir?%s" % (path, new_path, dir))
source_path = path
target_path = new_path
key = self.get_key(source_path)
self.cache.rename(source_path, target_path)
if key: # For files in cache or dir not on S3 but still not flushed to S3
self.rename_on_s3(key, source_path, target_path, dir)
def rename_on_s3(self, key, source_path, target_path, dir):
logger.debug("rename_on_s3 '%s' '%s' -> '%s' dir?%s" % (key, source_path, target_path, dir))
# Otherwise we loose the Content-Type with S3 Copy
key.metadata['Content-Type'] = key.content_type
### key.copy(key.bucket.name, target, key.metadata, preserve_acl=False)
target = self.join_prefix(target_path)
if dir:
target += '/'
pub = [ 'rename', source_path, target_path ]
if isinstance(target, bytes):
target_for_cmd = target.decode('utf-8')
else:
target_for_cmd = target
cmds = [ [ 'copy', [ key.bucket.name, target_for_cmd, key.metadata ],
{ 'preserve_acl': False , 'encrypt_key':self.aws_managed_encryption } ],
[ 'delete', [], { 'headers': self.default_headers } ] ]
self.do_on_s3(key, pub, cmds)
###key.delete()
###self.publish(['rename', source_path, target_path])
def mknod(self, path, mode, dev=None):
logger.debug("mknod '%s' '%i' '%s'" % (path, mode, dev))
with self.cache.get_lock(path):
if self.cache.is_not_empty(path):
logger.debug("mknod '%s' '%i' '%s' cache EEXIST" % (path, mode, dev))
raise FuseOSError(errno.EEXIST)
k = self.get_key(path)
if k:
logger.debug("mknod '%s' '%i' '%s' key EEXIST" % (path, mode, dev))
raise FuseOSError(errno.EEXIST)
self.cache.add(path)
now = get_current_time()
uid, gid = get_uid_gid()
attr = {}
attr['st_uid'] = uid
attr['st_gid'] = gid
attr['st_mode'] = int(stat.S_IFREG | mode)
attr['st_atime'] = now
attr['st_mtime'] = now
attr['st_ctime'] = now
attr['st_size'] = 0 # New file
if self.cache_on_disk > 0:
data = FSData(self.cache, 'mem', path) # New files (almost) always cache in mem - is it ok ???
else:
data = FSData(self.cache, 'disk', path)
self.cache.set(path, 'data', data)
data.set('change', True)
self.set_metadata(path, 'attr', attr)
self.set_metadata(path, 'xattr', {})
self.add_to_parent_readdir(path)
self.publish(['mknod', path])
return 0
def unlink(self, path):
logger.debug("unlink '%s'" % (path))
if self.cache.is_deleting(path):
logger.debug("unlink path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("unlink '%s' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
k = self.get_key(path)
if not k and not self.cache.has(path):
logger.debug("unlink '%s' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
self.cache.reset(path, with_deleting = bool(k)) # Cache invaliation
self.remove_from_parent_readdir(path)
if k:
logger.debug("unlink '%s' '%s' S3" % (path, k))
###k.delete()
###self.publish(['unlink', path])
pub = [ 'unlink', path ]
cmds = [ [ 'delete', [], { 'headers': self.default_headers } ] ]
self.do_on_s3(k, pub, cmds)
# self.do_on_s3_now(k, pub, cmds)
return 0
def create(self, path, mode, fi=None):
logger.debug("create '%s' '%i' '%s'" % (path, mode, fi))
return self.open(path, mode)
def open(self, path, flags):
logger.debug("open '%s' '%i'" % (path, flags))
with self.cache.get_lock(path):
self.cache.add(path)
if not self.check_data(path):
self.mknod(path, flags)
self.cache.get(path, 'data').open()
logger.debug("open '%s' '%i' '%s'" % (path, flags, self.cache.get(path, 'data').get('open')))
return 0
def release(self, path, flags):
logger.debug("release '%s' '%i'" % (path, flags))
if self.cache.is_deleting(path):
logger.debug("release path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("release '%s' '%i' ENOENT" % (path, flags))
raise FuseOSError(errno.ENOENT)
data = self.cache.get(path, 'data')
if data:
if data.has('change') and data.get('open') == 1: # Last one to release the file
self.upload_to_s3(path, data)
data.close() # Close after upload to have data.content populated for disk cache
logger.debug("release '%s' '%i' '%s'" % (path, flags, data.get('open')))
else:
logger.debug("release '%s' '%i'" % (path, flags))
return 0
def read(self, path, length, offset, fh=None):
logger.debug("read '%s' '%i' '%i' '%s'" % (path, length, offset, fh))
if self.cache.is_deleting(path):
logger.debug("read path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
if not self.cache.has(path) or self.cache.is_empty(path):
logger.debug("read '%s' '%i' '%i' '%s' ENOENT" % (path, length, offset, fh))
raise FuseOSError(errno.ENOENT)
retry = True
# for https://github.com/danilop/yas3fs/issues/46
retriesAttempted = 0
while retry:
retriesAttempted += 1
# for https://github.com/danilop/yas3fs/issues/46
if retriesAttempted > self.read_retries_num:
logger.error("read '%s' '%i' '%i' '%s' max read retries exceeded max: %i sleep: %i retries: %i, raising FuseOSError(errno.EIO) ''" % (path, length, offset, fh, self.read_retries_num, self.read_retries_sleep, retriesAttempted))
retry = False
self.invalidate_cache(path)
raise FuseOSError(errno.EIO)
data = self.cache.get(path, 'data')
if not data:
logger.debug("read '%s' '%i' '%i' '%s' no data" % (path, length, offset, fh))
return '' # Something better ???
data_range = data.get('range')
if data_range is None:
logger.debug("read '%s' '%i' '%i' '%s' no range" % (path, length, offset, fh))
break
attr = self.get_metadata(path, 'attr')
file_size = attr['st_size']
end_interval = min(offset + length, file_size) - 1
if offset > end_interval:
logger.debug("read '%s' '%i' '%i' '%s' offset=%i > end_interval=%i" %((path, length, offset, fh, offset, end_interval)))
return '' # Is this ok ???
read_interval = [offset, end_interval]
if data_range.interval.contains(read_interval):
if self.buffer_prefetch:
prefetch_start = end_interval + 1
prefetch_length = self.buffer_size * self.buffer_prefetch
logger.debug("download prefetch '%s' '%i' '%i'" % (path, prefetch_start, prefetch_length))
prefetch_end_interval = min(prefetch_start + prefetch_length, file_size) - 1
if prefetch_start < prefetch_end_interval:
prefetch_interval = [prefetch_start, prefetch_end_interval]
if not data_range.interval.contains(prefetch_interval):
self.enqueue_download_data(path, prefetch_start, prefetch_length, prefetch=True)
logger.debug("read '%s' '%i' '%i' '%s' in range" % (path, length, offset, fh))
break
else:
if retriesAttempted > 1:
logger.debug('%d retries' % (retriesAttempted))
time.sleep(self.read_retries_sleep)
# Note added max retries as this can go on forever... for https://github.com/danilop/yas3fs/issues/46
logger.debug("read '%s' '%i' '%i' '%s' out of range" % (path, length, offset, fh))
self.enqueue_download_data(path, offset, length)
logger.debug("read wait '%s' '%i' '%i' '%s'" % (path, length, offset, fh))
data_range.wait()
logger.debug("read awake '%s' '%i' '%i' '%s'" % (path, length, offset, fh))
# update atime just in the cache ???
with data.get_lock():
if not data.content:
logger.debug("read '%s' '%i' '%i' '%s' no content" % (path, length, offset, fh))
return '' # Something better ???
data.content.seek(offset)
return data.content.read(length)
def write(self, path, new_data, offset, fh=None):
logger.debug("write '%s' '%i' '%i' '%s'" % (path, len(new_data), offset, fh))
if self.cache.is_deleting(path):
logger.debug("write path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
if not self.cache.has(path) or self.cache.is_empty(path):
logger.debug("write '%s' '%i' '%i' '%s' ENOENT" % (path, len(new_data), offset, fh))
raise FuseOSError(errno.ENOENT)
if sys.version_info < (3, ):
if isinstance(new_data, unicode): # Fix for unicode
logger.debug("write '%s' '%i' '%i' '%s' unicode fix" % (path, len(new_data), offset, fh))
new_data = str(new_data.encode('utf-8'))
else:
if not isinstance(new_data, bytes):
new_data = new_data.encode('utf-8')
length = len(new_data)
data = self.cache.get(path, 'data')
data_range = data.get('range')
if data_range:
self.enqueue_download_data(path)
while data_range:
logger.debug("write wait '%s' '%i' '%i' '%s'" % (path, len(new_data), offset, fh))
data_range.wait()
logger.debug("write awake '%s' '%i' '%i' '%s'" % (path, len(new_data), offset, fh))
data_range = data.get('range')
with data.get_lock():
if not data.content:
logger.info("write awake '%s' '%i' '%i' '%s' no content" % (path, len(new_data), offset, fh))
return 0
if isinstance(data.content.name, bytes):
contentname = data.content.name.decode('utf-8')
else:
contentname = data.content.name
logger.debug("write '%s' '%i' '%i' '%s' '%s' content" % (path, len(new_data), offset, fh, contentname))
data.content.seek(offset)
data.content.write(new_data)
data.set('change', True)
now = get_current_time()
attr = self.get_metadata(path, 'attr')
old_size = attr['st_size']
new_size = max(old_size, offset + length)
if new_size != old_size:
attr['st_size'] = new_size
data.update_size()
attr['st_mtime'] = now
attr['st_atime'] = now
return length
def upload_to_s3(self, path, data):
logger.debug("upload_to_s3 '%s'" % path)
k = self.get_key(path)
if not k: # New key
k = UTF8DecodingKey(self.s3_bucket)
k.key = self.join_prefix(path)
self.cache.set(path, 'key', k)
now = get_current_time()
attr = self.get_metadata(path, 'attr', k)
attr['st_atime'] = now
attr['st_mtime'] = now
self.set_metadata(path, 'attr', None, k) # To update key metadata before upload to S3
self.set_metadata(path, 'xattr', None, k) # To update key metadata before upload to S3
mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
if k.size is None:
old_size = 0
else:
old_size = k.size
written = False
pub = [ 'upload', path ] # Add Etag before publish
headers = { 'Content-Type': mimetype }
headers.update(self.default_write_headers)
logger.debug("multipart test: key '%s' mp-num '%s' st_size '%s' mp-size '%s'" %(path, self.multipart_num, attr['st_size'], self.multipart_size))
if self.multipart_num > 0:
full_size = attr['st_size']
if full_size > self.multipart_size:
logger.debug("upload_to_s3 '%s' '%s' '%s' S3 multipart" % (path, k, mimetype))
cmds = [ [ 'multipart_upload', [ k.name, data, full_size, headers, k.metadata ] ] ]
written = True
if not written:
logger.debug("upload_to_s3 '%s' '%s' '%s' S3" % (path, k, mimetype))
###k.set_contents_from_file(data.content, headers=headers)
cmds = [ [ 'set_contents_from_file', [ data ], { 'headers': headers } ] ]
self.do_on_s3(k, pub, cmds)
###self.publish(['upload', path, etag])
logger.debug("upload_to_s3 '%s' done" % path)
def multipart_upload(self, key_path, data, full_size, headers, metadata):
logger.debug("multipart_upload '%s' '%s' '%s' '%s'" % (key_path, data, full_size, headers))
part_num = 0
part_pos = 0
part_queue = Queue()
multipart_size = max(self.multipart_size, full_size / 100) # No more than 100 parts...
logger.debug("multipart_upload '%s' multipart_size '%s'" % (key_path, multipart_size))
while part_pos < full_size:
bytes_left = full_size - part_pos
if bytes_left > self.multipart_size:
part_size = self.multipart_size
else:
part_size = bytes_left
part_num += 1
part_queue.put([ part_num, PartOfFSData(data, part_pos, part_size) ])
part_pos += part_size
logger.debug("part from %i for %i" % (part_pos, part_size))
logger.debug("initiate_multipart_upload '%s' '%s'" % (key_path, headers))
num_threads = min(part_num, self.multipart_num)
logger.debug("multipart_upload '%s' num_threads '%s'" % (key_path, num_threads))
# encoding for https://github.com/danilop/yas3fs/issues/56
mpu = self.s3_bucket.initiate_multipart_upload(key_path.encode('utf-8'), headers=headers, metadata=metadata)
self.multipart_uploads_in_progress += 1
for i in range(num_threads):
t = TracebackLoggingThread(target=self.part_upload, args=(mpu, part_queue), name=("PartUpload-%04d" % i))
t.demon = True
t.start()
logger.debug("multipart_upload thread '%i' started" % i)
logger.debug("multipart_upload all threads started '%s' '%s' '%s'" % (key_path, data, headers))
part_queue.join()
logger.debug("multipart_upload all threads joined '%s' '%s' '%s'" % (key_path, data, headers))
if len(mpu.get_all_parts()) == part_num:
logger.debug("multipart_upload ok '%s' '%s' '%s'" % (key_path, data, headers))
new_key = mpu.complete_upload()
self.multipart_uploads_in_progress -= 1
else:
logger.debug("multipart_upload cancel '%s' '%s' '%s' '%i' != '%i'" % (key_path, data, headers, len(mpu.get_all_parts()), part_num))
mpu.cancel_upload()
new_key = None
self.multipart_uploads_in_progress -= 1
return new_key
def part_upload(self, mpu, part_queue):
logger.debug("new thread!")
try:
while (True):
logger.debug("trying to get a part from the queue")
[ num, part ] = part_queue.get(False)
for retry in range(self.multipart_retries):
logger.debug("begin upload of part %i retry %i part__ %s" % (num, retry, str(part.__dict__)))
try:
mpu.upload_part_from_file(fp=part, part_num=num)
break
except Exception as e:
# reset to initial position, before next retry
# this force fixes an issue where the position
# is off after an uncaught low-level connection
# exception is thrown
part.pos = 0
logger.exception(e)
logger.info("error during multipart upload part %i retry %i part__ %s : %s"
% (num, retry, str(part.__dict__), sys.exc_info()[0]))
time.sleep(self.s3_retries_sleep) # Better wait N seconds before retrying
logger.debug("end upload of part %i retry %i part__ %s" % (num, retry, str(part.__dict__)))
part_queue.task_done()
except QueueEmpty:
logger.debug("the queue is empty")
def chmod(self, path, mode):
logger.debug("chmod '%s' '%i'" % (path, mode))
if self.cache.is_deleting(path):
logger.debug("chmod path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("chmod '%s' '%i' ENOENT" % (path, mode))
raise FuseOSError(errno.ENOENT)
attr = self.get_metadata(path, 'attr')
if not attr:
return attr
if attr['st_mode'] != mode:
attr['st_mode'] = mode
self.set_metadata(path, 'attr')
return 0
def chown(self, path, uid, gid):
logger.debug("chown '%s' '%i' '%i'" % (path, uid, gid))
if self.cache.is_deleting(path):
logger.debug("chown path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("chown '%s' '%i' '%i' ENOENT" % (path, uid, gid))
raise FuseOSError(errno.ENOENT)
attr = self.get_metadata(path, 'attr')
if not attr:
return attr
changed = False
if uid != -1 and attr['st_uid'] != uid:
attr['st_uid'] = uid
changed = True
if gid != -1 and attr['st_gid'] != gid:
attr['st_gid'] = gid
changed = True
if changed:
self.set_metadata(path, 'attr')
return 0
def utimens(self, path, times=None):
logger.debug("utimens '%s' '%s'" % (path, times))
if self.cache.is_deleting(path):
logger.debug("utimens path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("utimens '%s' '%s' ENOENT" % (path, times))
raise FuseOSError(errno.ENOENT)
now = get_current_time()
atime, mtime = times if times else (now, now)
attr = self.get_metadata(path, 'attr')
logger.debug('utimens attr %s' % attr)
if not attr:
return attr
attr['st_atime'] = atime
attr['st_mtime'] = mtime
self.set_metadata(path, 'attr')
return 0
def getxattr(self, path, name, position=0):
logger.debug("getxattr '%s' '%s' '%i'" % (path, name, position))
if name in ['yas3fs.bucket', 'user.yas3fs.bucket']:
return self.s3_bucket_name
if self.cache.is_deleting(path):
logger.debug("getxattr path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
if self.cache.is_empty(path):
logger.debug("getxattr '%s' '%s' '%i' ENOENT" % (path, name, position))
raise FuseOSError(errno.ENOENT)
key = self.get_key(path)
if not key:
if self.darwin:
raise FuseOSError(errno.ENOENT) # Should return ENOATTR
else:
return '' # Should return ENOATTR
if name in ['yas3fs.key', 'user.yas3fs.key']:
return key.key
if name in ['yas3fs.URL', 'user.yas3fs.URL']:
tmp_key = copy.copy(key)
tmp_key.metadata = {} # To remove unnecessary metadata headers
tmp_key.version_id = None
return tmp_key.generate_url(expires_in=0, headers=self.default_headers, query_auth=False)
xattr = self.get_metadata(path, 'xattr')
if xattr is None:
logger.debug("getxattr <- '%s' '%s' '%i' ENOENT" % (path, name, position))
raise FuseOSError(errno.ENOENT)
if name in ['yas3fs.signedURL', 'user.yas3fs.signedURL']:
try:
seconds = int(xattr['user.yas3fs.expiration'])
except KeyError:
seconds = self.default_expiration
tmp_key = copy.copy(key)
tmp_key.metadata = {} # To remove unnecessary metadata headers
tmp_key.version_id = None
return tmp_key.generate_url(expires_in=seconds, headers=self.default_headers)
if name in ['yas3fs.expiration', 'user.yas3fs.expiration']:
if 'user.yas3fs.expiration' not in xattr:
return str(self.default_expiration) + ' (default)'
try:
return xattr[name]
except KeyError:
if self.darwin:
raise FuseOSError(errno.ENOENT) # Should return ENOATTR
else:
return '' # Should return ENOATTR
def listxattr(self, path):
logger.debug("listxattr '%s'" % (path))
if self.cache.is_deleting(path):
logger.debug("listxattr path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
if self.cache.is_empty(path):
logger.debug("listxattr '%s' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
xattr = self.get_metadata(path, 'xattr')
if xattr is None:
logger.debug("listxattr <- '%s' '%s' '%i' ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
return set(self.yas3fs_xattrs + list(xattr.keys()))
def removexattr(self, path, name):
logger.debug("removexattr '%s''%s'" % (path, name))
if self.cache.is_deleting(path):
logger.debug("removexattr path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("removexattr '%s' '%s' ENOENT" % (path, name))
raise FuseOSError(errno.ENOENT)
if name in self.yas3fs_xattrs and name not in ['user.yas3fs.expiration']:
return 0 # Do nothing
xattr = self.get_metadata(path, 'xattr')
try:
del xattr[name]
self.set_metadata(path, 'xattr')
except KeyError:
if name not in self.yas3fs_xattrs:
logger.debug("removexattr '%s' '%s' should ENOATTR" % (path, name))
if self.darwin:
raise FuseOSError(errno.ENOENT) # Should return ENOATTR
else:
return '' # Should return ENOATTR
return 0
def setxattr(self, path, name, value, options, position=0):
logger.debug("setxattr '%s' '%s'" % (path, name))
if self.cache.is_deleting(path):
logger.debug("setxattr path '%s' is deleting -- throwing ENOENT" % (path))
raise FuseOSError(errno.ENOENT)
with self.cache.get_lock(path):
if self.cache.is_empty(path):
logger.debug("setxattr '%s' '%s' ENOENT" % (path, name))
raise FuseOSError(errno.ENOENT)
if name in self.yas3fs_xattrs and name not in ['user.yas3fs.expiration']:
return 0 # Do nothing
xattr = self.get_metadata(path, 'xattr')
if not xattr:
return xattr
if name not in xattr or xattr[name] != value:
xattr[name] = value
self.set_metadata(path, 'xattr')
return 0
def statfs(self, path):
logger.debug("statfs '%s'" % (path))
"""Returns a dictionary with keys identical to the statvfs C
structure of statvfs(3).
The 'f_frsize', 'f_favail', 'f_fsid' and 'f_flag' fields are ignored
On Mac OS X f_bsize and f_frsize must be a power of 2
(minimum 512)."""
return {
"f_bsize" : 1024 * 1024,
"f_frsize": 1024 * 1024 * 1024,
"f_blocks" : 1024 * 1024 * 1024,
"f_bfree" : 1024 * 1024 * 1024,
"f_bavail" : 1024 * 1024 * 1024,
"f_files" : 1024 * 1024 * 1024,
"f_ffree" : 1024 * 1024 * 1024,
"f_favail" : 1024 * 1024 * 1024,
# "f_fsid": 512,
# "f_flag" : 4096,
"f_namemax" : 512
}
class TracebackLoggingThread(threading.Thread):
def run(self):
try:
super(TracebackLoggingThread, self).run()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
logger.exception("Uncaught Exception in Thread")
raise
class CompressedRotatingFileHandler(logging.handlers.RotatingFileHandler):
""" compress old files
from http://roadtodistributed.blogspot.com/2011/04/compressed-rotatingfilehandler-for.html
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
logging.handlers.RotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, delay)
def doRollover(self):
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d.gz" % (self.baseFilename, i)
dfn = "%s.%d.gz" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1.gz"
if os.path.exists(dfn):
os.remove(dfn)
import gzip
try:
f_in = open(self.baseFilename, 'rb')
f_out = gzip.open(dfn, 'wb')
f_out.writelines(f_in)
except:
if not os.path.exists(dfn):
if os.path.exists(self.baseFilename):
os.rename(self.baseFilename, dfn)
finally:
if "f_out" in dir() and f_out is not None:
f_out.close()
if "f_in" in dir() and f_in is not None:
f_in.close()
if os.path.exists(self.baseFilename):
os.remove(self.baseFilename)
#os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
### Utility functions
def error_and_exit(error, exitCode=1):
logger.error(error + ", use -h for help.")
exit(exitCode)
def create_dirs(dirname):
logger.debug("create_dirs '%s'" % dirname)
try:
if not isinstance(dirname, bytes):
dirname = dirname.encode('utf-8')
os.makedirs(dirname)
logger.debug("create_dirs '%s' done" % dirname)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dirname):
logger.debug("create_dirs '%s' already there" % dirname)
pass
else:
raise
except Exception as exc: # Python >2.5
logger.debug("create_dirs '%s' ERROR %s" % (dirname, exc))
raise
def remove_empty_dirs(dirname):
logger.debug("remove_empty_dirs '%s'" % (dirname))
try:
if not isinstance(dirname, str):
# dirname must be a string for replace
dirname = dirname.decode('utf-8')
# fix for https://github.com/danilop/yas3fs/issues/150
# remove cache_path part from dirname to avoid accidental removal of /tmp (if empty)
os.chdir(yas3fsobj.cache_path)
dirname = dirname.replace(yas3fsobj.cache_path + '/', '')
dirname = dirname.encode('utf-8')
os.removedirs(dirname)
logger.debug("remove_empty_dirs '%s' done", dirname)
except OSError as exc: # Python >2.5
if exc.errno == errno.ENOTEMPTY:
logger.debug("remove_empty_dirs '%s' not empty", dirname)
pass
else:
raise
except Exception as e:
logger.exception(e)
logger.error("remove_empty_dirs exception: %s", dirname)
raise e
def create_dirs_for_file(filename):
logger.debug("create_dirs_for_file '%s'" % filename)
if not isinstance(filename, bytes):
filename = filename.encode('utf-8')
dirname = os.path.dirname(filename)
create_dirs(dirname)
def remove_empty_dirs_for_file(filename):
logger.debug("remove_empty_dirs_for_file '%s'" % filename)
if not isinstance(filename, bytes):
filename = filename.encode('utf-8')
dirname = os.path.dirname(filename)
remove_empty_dirs(dirname)
def get_current_time():
return time.mktime(time.gmtime())
def get_uid_gid():
uid, gid, pid = fuse_get_context()
return int(uid), int(gid)
def thread_is_not_alive(t):
return t is None or not t.is_alive()
def custom_sys_excepthook(type, value, tb):
logger.exception("Uncaught Exception: " + str(type) + " " + str(value) + " " + str(tb))
### Main
def main():
try:
default_aws_region = os.environ['AWS_DEFAULT_REGION']
except KeyError:
default_aws_region = 'us-east-1'
description = """
YAS3FS (Yet Another S3-backed File System) is a Filesystem in Userspace (FUSE) interface to Amazon S3.
It allows to mount an S3 bucket (or a part of it, if you specify a path) as a local folder.
It works on Linux and Mac OS X.
For maximum speed all data read from S3 is cached locally on the node, in memory or on disk, depending of the file size.
Parallel multi-part downloads are used if there are reads in the middle of the file (e.g. for streaming).
Parallel multi-part uploads are used for files larger than a specified size.
With buffering enabled (the default) files can be accessed during the download from S3 (e.g. for streaming).
It can be used on more than one node to create a "shared" file system (i.e. a yas3fs "cluster").
SNS notifications are used to update other nodes in the cluster that something has changed on S3 and they need to invalidate their cache.
Notifications can be delivered to HTTP or SQS endpoints.
If the cache grows to its maximum size, the less recently accessed files are removed.
Signed URLs are provided through Extended file attributes (xattr).
AWS credentials can be passed using AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables.
In an EC2 instance a IAM role can be used to give access to S3/SNS/SQS resources.
AWS_DEFAULT_REGION environment variable can be used to set the default AWS region."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('s3path', metavar='S3Path',
help='the S3 path to mount in s3://BUCKET/PATH format, ' +
'PATH can be empty, can contain subfolders and is created on first mount if not found in the BUCKET')
parser.add_argument('mountpoint', metavar='LocalPath',
help='the local mount point')
parser.add_argument('--region', default=default_aws_region,
help='AWS region to use for SNS and SQS (default is %(default)s)')
parser.add_argument('--topic', metavar='ARN',
help='SNS topic ARN')
parser.add_argument('--new-queue', action='store_true',
help='create a new SQS queue that is deleted on unmount to listen to SNS notifications, ' +
'overrides --queue, queue name is BUCKET-PATH-ID with alphanumeric characters only')
parser.add_argument('--new-queue-with-hostname', action='store_true',
help='create a new SQS queue with hostname in queuename, ' +
'overrides --queue, queue name is BUCKET-PATH-ID with alphanumeric characters only')
parser.add_argument('--queue', metavar='NAME',
help='SQS queue name to listen to SNS notifications, a new queue is created if it doesn\'t exist')
parser.add_argument('--queue-wait', metavar='N', type=int, default=20,
help='SQS queue wait time in seconds (using long polling, 0 to disable, default is %(default)s seconds)')
parser.add_argument('--queue-polling', metavar='N', type=int, default=0,
help='SQS queue polling interval in seconds (default is %(default)s seconds)')
parser.add_argument('--hostname',
help='public hostname to listen to SNS HTTP notifications')
parser.add_argument('--use-ec2-hostname', action='store_true',
help='get public hostname to listen to SNS HTTP notifications ' +
'from EC2 instance metadata (overrides --hostname)')
parser.add_argument('--port', metavar='N',
help='TCP port to listen to SNS HTTP notifications')
parser.add_argument('--cache-entries', metavar='N', type=int, default=100000,
help='max number of entries to cache (default is %(default)s entries)')
parser.add_argument('--cache-mem-size', metavar='N', type=int, default=128,
help='max size of the memory cache in MB (default is %(default)s MB)')
parser.add_argument('--cache-disk-size', metavar='N', type=int, default=1024,
help='max size of the disk cache in MB (default is %(default)s MB)')
parser.add_argument('--cache-path', metavar='PATH', default='',
help='local path to use for disk cache (default is /tmp/yas3fs-BUCKET-PATH-random)')
parser.add_argument('--recheck-s3', action='store_true',
help='Cached ENOENT (error no entry) rechecks S3 for new file/directory')
parser.add_argument('--cache-on-disk', metavar='N', type=int, default=0,
help='use disk (instead of memory) cache for files greater than the given size in bytes ' +
'(default is %(default)s bytes)')
parser.add_argument('--cache-check', metavar='N', type=int, default=5,
help='interval between cache size checks in seconds (default is %(default)s seconds)')
parser.add_argument('--s3-num', metavar='N', type=int, default=32,
help='number of parallel S3 calls (0 to disable writeback, default is %(default)s)')
parser.add_argument('--s3-retries', metavar='N', type=int, default=3,
help='number of of times to retry any s3 write operation (default is %(default)s)')
parser.add_argument('--s3-retries-sleep', metavar='N', type=int, default=1,
help='retry sleep in seconds between s3 write operations (default is %(default)s)')
parser.add_argument('--s3-use-sigv4', action='store_true',
help='use AWS signature version 4 for authentication (required for some regions)')
parser.add_argument('--s3-endpoint',
help='endpoint of the s3 bucket, required with --s3-use-sigv4')
parser.add_argument('--download-num', metavar='N', type=int, default=4,
help='number of parallel downloads (default is %(default)s)')
parser.add_argument('--download-retries-num', metavar='N', type=int, default=60,
help='max number of retries when downloading (default is %(default)s)')
parser.add_argument('--download-retries-sleep', metavar='N', type=int, default=1,
help='how long to sleep in seconds between download retries (default is %(default)s seconds)')
parser.add_argument('--read-retries-num', metavar='N', type=int, default=10,
help='max number of retries when read() is invoked (default is %(default)s)')
parser.add_argument('--read-retries-sleep', metavar='N', type=int, default=1,
help='how long to sleep in seconds between read() retries (default is %(default)s seconds)')
parser.add_argument('--prefetch-num', metavar='N', type=int, default=2,
help='number of parallel prefetching downloads (default is %(default)s)')
parser.add_argument('--st-blksize', metavar='N', type=int, default=None,
help='st_blksize to return to getattr() callers in bytes, optional')
parser.add_argument('--buffer-size', metavar='N', type=int, default=10240,
help='download buffer size in KB (0 to disable buffering, default is %(default)s KB)')
parser.add_argument('--buffer-prefetch', metavar='N', type=int, default=0,
help='number of buffers to prefetch (default is %(default)s)')
parser.add_argument('--no-metadata', action='store_true',
help='don\'t write user metadata on S3 to persist file system attr/xattr')
parser.add_argument('--prefetch', action='store_true',
help='download file/directory content as soon as it is discovered ' +
'(doesn\'t download file content if download buffers are used)')
parser.add_argument('--mp-size',metavar='N', type=int, default=100,
help='size of parts to use for multipart upload in MB ' +
'(default value is %(default)s MB, the minimum allowed by S3 is 5 MB)')
parser.add_argument('--mp-num', metavar='N', type=int, default=4,
help='max number of parallel multipart uploads per file ' +
'(0 to disable multipart upload, default is %(default)s)')
parser.add_argument('--mp-retries', metavar='N', type=int, default=3,
help='max number of retries in uploading a part (default is %(default)s)')
parser.add_argument('--aws-managed-encryption', action='store_true',
help='Enable AWS managed encryption (sets header x-amz-server-side-encryption = AES256)')
parser.add_argument('--id',
help='a unique ID identifying this node in a cluster (default is a UUID)')
parser.add_argument('--mkdir', action='store_true',
help='create mountpoint if not found (and create intermediate directories as required)')
parser.add_argument('--nonempty', action='store_true',
help='allows mounts over a non-empty file or directory')
parser.add_argument('--uid', metavar='N',
help='default UID')
parser.add_argument('--gid', metavar='N',
help='default GID')
parser.add_argument('--umask', metavar='MASK',
help='default umask')
parser.add_argument('--read-only', action='store_true',
help='mount read only')
parser.add_argument('--expiration', metavar='N', type=int, default=30*24*60*60,
help='default expiration for signed URL via xattrs (in seconds, default is 30 days)')
parser.add_argument('--requester-pays', action='store_true',
help='requester pays for S3 interactions, the bucket must have Requester Pays enabled')
parser.add_argument('--no-allow-other', action='store_true',
help='Do not allow other users to access mounted directory')
parser.add_argument('--no-default-permissions', action='store_true',
help='do NOT honor file system permissions for non-root users')
parser.add_argument('--with-plugin-file', metavar='FILE',
help="YAS3FSPlugin file")
parser.add_argument('--with-plugin-class', metavar='CLASS',
help="YAS3FSPlugin class, if this is not set it will take the first child of YAS3FSPlugin from exception handler file")
parser.add_argument('-l', '--log', metavar='FILE',
help='filename for logs')
parser.add_argument('--log-mb-size', metavar='N', type=int, default=100,
help='max size of log file')
parser.add_argument('--log-backup-count', metavar='N', type=int, default=10,
help='number of backups log files')
parser.add_argument('--log-backup-gzip', action='store_true',
help='flag to gzip backup files')
parser.add_argument('-f', '--foreground', action='store_true',
help='run in foreground')
parser.add_argument('-d', '--debug', action='store_true',
help='show debug info')
parser.add_argument('-V', '--version', action='version', version='%(prog)s {version}'.format(version=__version__))
options = parser.parse_args()
global pp
pp = pprint.PrettyPrinter(indent=1)
global logger
logger = logging.getLogger('yas3fs')
formatter = ISO8601Formatter('%(threadName)s %(asctime)s %(levelname)s %(message)s')
if options.log: # Rotate log files at 100MB size
log_size = options.log_mb_size *1024*1024
if options.log_backup_gzip:
logHandler = CompressedRotatingFileHandler(options.log, maxBytes=log_size, backupCount=options.log_backup_count)
else:
logHandler = logging.handlers.RotatingFileHandler(options.log, maxBytes=log_size, backupCount=options.log_backup_count)
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
if options.foreground or not options.log:
logHandler = logging.StreamHandler()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
if options.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
sys.excepthook = custom_sys_excepthook # This is not working for new threads that start afterwards
logger.debug("options = %s" % options)
if options.mkdir:
create_dirs(options.mountpoint)
mount_options = {
'mountpoint':options.mountpoint,
'fsname':'yas3fs',
'foreground':options.foreground,
'allow_other':True,
'auto_cache':True,
'atime':False,
'max_read':131072,
'max_write':131072,
'max_readahead':131072,
'direct_io':True,
'default_permissions':True
}
if options.no_allow_other:
mount_options["allow_other"] = False
if options.uid:
mount_options['uid'] = options.uid
if options.gid:
mount_options['gid'] = options.gid
if options.umask:
mount_options['umask'] = options.umask
if options.read_only:
mount_options['ro'] = True
if options.no_default_permissions:
mount_options["default_permissions"] = False
if options.nonempty:
mount_options['nonempty'] = True
options.darwin = (sys.platform == "darwin")
if options.darwin:
mount_options['volname'] = os.path.basename(options.mountpoint)
mount_options['noappledouble'] = True
mount_options['daemon_timeout'] = 3600
# mount_options['auto_xattr'] = True # To use xattr
# mount_options['local'] = True # local option is quite unstable
else:
mount_options['big_writes'] = True # Not working on OSX
fuse = FUSE(YAS3FS(options), **mount_options)
if __name__ == '__main__':
main()
|
{
"content_hash": "0a2be20fd8639087821c2f5ae40be02a",
"timestamp": "",
"source": "github",
"line_count": 3397,
"max_line_length": 242,
"avg_line_length": 45.226964969090375,
"alnum_prop": 0.5354018589393111,
"repo_name": "danilop/yas3fs",
"id": "9aee26d71d0ffd4a07576a19ac21d46420a3e04e",
"size": "153659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yas3fs/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189397"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('superlachaise_api', '0005_auto_20150622_1855'),
]
operations = [
migrations.CreateModel(
name='DeletedObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('notes', models.TextField(verbose_name='notes', blank=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('target_object_class', models.CharField(max_length=255, verbose_name='target object class', choices=[(b'SuperLachaiseCategory', 'superlachaise category'), (b'OpenStreetMapElement', 'openstreetmap element'), (b'WikidataEntry', 'wikidata entry'), (b'WikimediaCommonsCategory', 'wikimedia commons category'), (b'SuperLachaisePOI', 'superlachaise POI')])),
('target_object_id', models.CharField(max_length=255, verbose_name='target object id')),
],
options={
'ordering': ['modified'],
'verbose_name': 'deleted object',
'verbose_name_plural': 'deleted objects',
},
),
migrations.AlterUniqueTogether(
name='deletedobject',
unique_together=set([('target_object_class', 'target_object_id')]),
),
]
|
{
"content_hash": "e4626c112c78d11d97e803aa259bd21d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 369,
"avg_line_length": 47.121212121212125,
"alnum_prop": 0.6064308681672026,
"repo_name": "MaximeLM/superlachaise_api",
"id": "973d0596628344187f0c259b78015f36dbffe441",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/0006_auto_20150622_1902.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5176"
},
{
"name": "Python",
"bytes": "296396"
}
],
"symlink_target": ""
}
|
"""The tests device sun light trigger component."""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import blumate.loader as loader
from blumate.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from blumate.components import (
device_tracker, light, sun, device_sun_light_trigger)
from blumate.helpers import event_decorators
from tests.common import (
get_test_config_dir, get_test_home_assistant, ensure_sun_risen,
ensure_sun_set)
KNOWN_DEV_YAML_PATH = os.path.join(get_test_config_dir(),
device_tracker.YAML_DEVICES)
def setUpModule(): # pylint: disable=invalid-name
"""Write a device tracker known devices file to be used."""
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_1', device_tracker.Device(
None, None, None, True, 'device_1', 'DEV1',
picture='http://example.com/dev1.jpg'))
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_2', device_tracker.Device(
None, None, None, True, 'device_2', 'DEV2',
picture='http://example.com/dev2.jpg'))
def tearDownModule(): # pylint: disable=invalid-name
"""Remove device tracker known devices file."""
os.remove(KNOWN_DEV_YAML_PATH)
class TestDeviceSunLightTrigger(unittest.TestCase):
"""Test the device sun light trigger module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
event_decorators.HASS = self.hass
self.scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
self.scanner.reset()
self.scanner.come_home('DEV1')
loader.get_component('light.test').init()
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(light.setup(self.hass, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(sun.setup(
self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
event_decorators.HASS = None
def test_lights_on_when_sun_sets(self):
"""Test lights go on when there is someone home and the sun sets."""
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
ensure_sun_risen(self.hass)
light.turn_off(self.hass)
self.hass.pool.block_till_done()
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
def test_lights_turn_off_when_everyone_leaves(self):
"""Test lights turn off when everyone leaves the house."""
light.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
self.hass.pool.block_till_done()
self.assertFalse(light.is_on(self.hass))
def test_lights_turn_on_when_coming_home_after_sun_set(self):
"""Test lights turn on when coming home after sun set."""
light.turn_off(self.hass)
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
|
{
"content_hash": "111143a867c05c6f9f0bb48e33ca79e2",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 76,
"avg_line_length": 33.577586206896555,
"alnum_prop": 0.6354300385109114,
"repo_name": "bdfoster/blumate",
"id": "f712f2808cc1f02b75bdc48bc70fd5b6e98890de",
"size": "3895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/test_device_sun_light_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1309487"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2460958"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6407"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ cookiecutter.package_name }}.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "506d3957630d52c0e64e1126a8d0fdfb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 95,
"avg_line_length": 28,
"alnum_prop": 0.7063492063492064,
"repo_name": "istrategylabs/mo-django",
"id": "aa64e972f3d0845d8f67529a47fbd24a03d9f751",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "3194"
},
{
"name": "JavaScript",
"bytes": "4806"
},
{
"name": "Python",
"bytes": "13074"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from django.db import models
class Platform(models.Model):
name = models.CharField(max_length=200)
class Meta:
ordering = ['name']
def __str__(self):
return '{}'.format(self.name)
@staticmethod
def autocomplete_search_fields():
return ('name__exact', 'name__icontains')
class Game(models.Model):
# Metadata.
name = models.CharField(max_length=200)
platform = models.ForeignKey(Platform, null=True, related_name='games')
# Imagery.
image_art = models.ImageField('art', blank=True, upload_to='games',
help_text='16:9 art. Used for backgrounds, etc. Minimum size should be 1280x720.')
image_boxart = models.ImageField('boxart', blank=True, upload_to='games',
help_text='8:11 art akin to Twitch. Used for supplimentary display, lists, etc.')
# Statuses.
is_abandoned = models.BooleanField('is abandoned?', default=False,
help_text='Has this game been abandoned for good?')
is_completed = models.BooleanField('is completed?', default=False,
help_text='Has this game been completed (if applicable).' )
def __str__(self):
return '{}'.format(self.name)
@staticmethod
def autocomplete_search_fields():
return ('name__exact', 'name__icontains')
|
{
"content_hash": "ab932c4e98f452f8221b98552e967be9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 90,
"avg_line_length": 32.15,
"alnum_prop": 0.6516329704510109,
"repo_name": "bryanveloso/avalonstar-tv",
"id": "56e57a16954bf0e1fb8314590417ffa5ebaf19f9",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/games/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103019"
},
{
"name": "CoffeeScript",
"bytes": "8505"
},
{
"name": "HTML",
"bytes": "8917"
},
{
"name": "JavaScript",
"bytes": "7461"
},
{
"name": "Python",
"bytes": "78004"
}
],
"symlink_target": ""
}
|
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.PortOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center'),
cfg.BoolOpt('dell_sc_verify_cert',
default=False,
help='Enable HTTPS SC certificate verification'),
cfg.StrOpt('secondary_san_ip',
default='',
help='IP address of secondary DSM controller'),
cfg.StrOpt('secondary_san_login',
default='Admin',
help='Secondary DSM user name'),
cfg.StrOpt('secondary_san_password',
default='',
help='Secondary DSM user password name',
secret=True),
cfg.PortOpt('secondary_sc_api_port',
default=3033,
help='Secondary Dell API port')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
driver.ExtendVD, driver.ManageableSnapshotsVD,
driver.SnapshotVD, driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
self.backends = self.configuration.safe_get('replication_device')
self.replication_enabled = True if self.backends else False
self.is_direct_connect = False
self.active_backend_id = kwargs.get('active_backend_id', None)
self.failed_over = (self.active_backend_id is not None)
self.storage_protocol = 'iSCSI'
self.failback_timeout = 30
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
:returns: gbs in int form
"""
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
"""One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
"""
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration, self.active_backend_id, self.storage_protocol)
def check_for_setup_error(self):
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
self.is_direct_connect = api.is_direct_connect
if self.is_direct_connect and self.replication_enabled:
msg = _('Dell Cinder driver configuration error replication '
'not supported with direct connect.')
raise exception.InvalidHost(reason=msg)
# If we are a healthy replicated system make sure our backend
# is alive.
if self.replication_enabled and not self.failed_over:
# Check that our replication destinations are available.
for backend in self.backends:
replssn = backend['target_device_id']
try:
# Just do a find_sc on it. If it raises we catch
# that and raise with a correct exception.
api.find_sc(int(replssn))
except exception.VolumeBackendAPIException:
msg = _('Dell Cinder driver configuration error '
'replication_device %s not found') % replssn
raise exception.InvalidHost(reason=msg)
def _get_volume_extra_specs(self, volume):
"""Gets extra specs for the given volume."""
type_id = volume.get('volume_type_id')
if type_id:
return volume_types.get_volume_type_extra_specs(type_id)
return {}
def _add_volume_to_consistency_group(self, api, scvolume, volume):
"""Just a helper to add a volume to a consistency group.
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
:returns: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
volume.get('consistencygroup_id'))
if profile:
api.update_cg_volumes(profile, [volume])
def _do_repl(self, api, volume):
"""Checks if we can do replication.
Need the extra spec set and we have to be talking to EM.
:param api: Dell REST API object.
:param volume: Cinder Volume object.
:return: Boolean (True if replication enabled), Boolean (True if
replication type is sync.
"""
do_repl = False
sync = False
# Repl does not work with direct connect.
if not self.failed_over and not self.is_direct_connect:
specs = self._get_volume_extra_specs(volume)
do_repl = specs.get('replication_enabled') == '<is> True'
sync = specs.get('replication_type') == '<in> sync'
return do_repl, sync
def _create_replications(self, api, volume, scvolume):
"""Creates any appropriate replications for a given volume.
:param api: Dell REST API object.
:param volume: Cinder volume object.
:param scvolume: Dell Storage Center Volume object.
:return: model_update
"""
# Replication V2
# for now we assume we have an array named backends.
replication_driver_data = None
# Replicate if we are supposed to.
do_repl, sync = self._do_repl(api, volume)
if do_repl:
for backend in self.backends:
# Check if we are to replicate the active replay or not.
specs = self._get_volume_extra_specs(volume)
replact = specs.get('replication:activereplay') == '<is> True'
if not api.create_replication(scvolume,
backend['target_device_id'],
backend.get('qosnode',
'cinderqos'),
sync,
backend.get('diskfolder', None),
replact):
# Create replication will have printed a better error.
msg = _('Replication %(name)s to %(ssn)s failed.') % {
'name': volume['id'],
'ssn': backend['target_device_id']}
raise exception.VolumeBackendAPIException(data=msg)
if not replication_driver_data:
replication_driver_data = backend['target_device_id']
else:
replication_driver_data += ','
replication_driver_data += backend['target_device_id']
# If we did something return model update.
model_update = {}
if replication_driver_data:
model_update = {'replication_status': 'enabled',
'replication_driver_data': replication_driver_data}
return model_update
@staticmethod
def _cleanup_failed_create_volume(api, volumename):
try:
api.delete_volume(volumename)
except exception.VolumeBackendAPIException as ex:
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
def create_volume(self, volume):
"""Create a volume."""
model_update = {}
# We use id as our name as it is unique.
volume_name = volume.get('id')
# Look for our volume
volume_size = volume.get('size')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
storage_profile = specs.get('storagetype:storageprofile')
replay_profile_string = specs.get('storagetype:replayprofiles')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
scvolume = api.create_volume(volume_name,
volume_size,
storage_profile,
replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume %s') %
volume_name)
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
# Create replications. (Or not. It checks.)
model_update = self._create_replications(api, volume, scvolume)
# Save our provider_id.
model_update['provider_id'] = scvolume['instanceId']
except Exception:
# if we actually created a volume but failed elsewhere
# clean up the volume now.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
data=_('Unable to create volume. Backend down.'))
return model_update
def _split_driver_data(self, replication_driver_data):
"""Splits the replication_driver_data into an array of ssn strings.
:param replication_driver_data: A string of comma separated SSNs.
:returns: SSNs in an array of strings.
"""
ssnstrings = []
# We have any replication_driver_data.
if replication_driver_data:
# Split the array and wiffle through the entries.
for str in replication_driver_data.split(','):
# Strip any junk from the string.
ssnstring = str.strip()
# Anything left?
if ssnstring:
# Add it to our array.
ssnstrings.append(ssnstring)
return ssnstrings
def _delete_replications(self, api, volume):
"""Delete replications associated with a given volume.
We should be able to roll through the replication_driver_data list
of SSNs and delete replication objects between them and the source
volume.
:param api: Dell REST API object.
:param volume: Cinder Volume object
:return:
"""
do_repl, sync = self._do_repl(api, volume)
if do_repl:
replication_driver_data = volume.get('replication_driver_data')
if replication_driver_data:
ssnstrings = self._split_driver_data(replication_driver_data)
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvol = api.find_volume(volume_name, provider_id)
# This is just a string of ssns separated by commas.
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
ssn = int(ssnstring)
if not api.delete_replication(scvol, ssn):
LOG.warning(_LW('Unable to delete replication of '
'Volume %(vname)s to Storage Center '
'%(sc)s.'),
{'vname': volume_name,
'sc': ssnstring})
# If none of that worked or there was nothing to do doesn't matter.
# Just move on.
def delete_volume(self, volume):
deleted = False
# We use id as our name as it is unique.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
self._delete_replications(api, volume)
deleted = api.delete_volume(volume_name, provider_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
"""Create snapshot"""
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
# TODO(tswanson): Is there any reason to think this will be set
# before I create the snapshot? Doesn't hurt to try to get it.
provider_id = snapshot.get('provider_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id)
if scvolume is not None:
replay = api.create_replay(scvolume, snapshot_id, 0)
if replay:
return {'status': 'available',
'provider_id': scvolume['instanceId']}
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
msg = _('Failed to create snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
model_update = {}
scvolume = None
volume_name = volume.get('id')
src_provider_id = snapshot.get('provider_id')
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
# cgsnapshot. If it was a cgsnapshot it will be identified on the Dell
# backend under cgsnapshot_id. Given the volume ID and the
# cgsnapshot_id we can find the appropriate snapshot.
# So first we look for cgsnapshot_id. If that is blank then it must
# have been a normal snapshot which will be found under snapshot_id.
snapshot_id = snapshot.get('cgsnapshot_id')
if not snapshot_id:
snapshot_id = snapshot.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
srcvol = api.find_volume(src_volume_name, src_provider_id)
if srcvol is not None:
replay = api.find_replay(srcvol, snapshot_id)
if replay is not None:
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
scvolume = api.create_view_volume(
volume_name, replay, replay_profile_string)
# Extend Volume
if scvolume and (volume['size'] >
snapshot["volume_size"]):
LOG.debug('Resize the new volume to %s.',
volume['size'])
scvolume = api.expand_volume(scvolume,
volume['size'])
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(snap)s.') %
{'name': volume_name,
'snap': snapshot_id})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
# Save our instanceid.
model_update['provider_id'] = (
scvolume['instanceId'])
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
'snap': snapshot_id})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
model_update = {}
scvolume = None
src_volume_name = src_vref.get('id')
src_provider_id = src_vref.get('provider_id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
srcvol = api.find_volume(src_volume_name, src_provider_id)
if srcvol is not None:
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
# Create our volume
scvolume = api.create_cloned_volume(
volume_name, srcvol, replay_profile_string)
# Extend Volume
if scvolume and volume['size'] > src_vref['size']:
LOG.debug('Resize the volume to %s.', volume['size'])
scvolume = api.expand_volume(scvolume, volume['size'])
# If either of those didn't work we bail.
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(vol)s.') %
{'name': volume_name,
'vol': src_volume_name})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
# Save our provider_id.
model_update['provider_id'] = scvolume['instanceId']
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
'src': src_volume_name})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
provider_id = snapshot.get('provider_id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id)
if scvolume and api.delete_replay(scvolume, snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
msg = _('Failed to delete snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_export(self, context, volume, connector):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
"""
scvolume = None
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
scvolume = api.find_volume(volume_name, provider_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
msg = _('Unable to find volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def remove_export(self, context, volume):
"""Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name,
'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
scvolume = api.find_volume(volume_name, provider_id)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
msg = _('Unable to extend volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
storageusage = api.get_storage_usage()
if not storageusage:
msg = _('Unable to retrieve volume stats.')
raise exception.VolumeBackendAPIException(message=msg)
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.storage_protocol
data['reserved_percentage'] = 0
data['consistencygroup_support'] = True
data['thin_provisioning_support'] = True
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication_type'] = ['async', 'sync']
data['replication_count'] = len(self.backends)
replication_targets = []
# Trundle through our backends.
for backend in self.backends:
target_device_id = backend.get('target_device_id')
if target_device_id:
replication_targets.append(target_device_id)
data['replication_targets'] = replication_targets
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
original_volume_name = volume.get('id')
current_name = new_volume.get('id')
# We should have this. If we don't we'll set it below.
provider_id = new_volume.get('provider_id')
LOG.debug('update_migrated_volume: %(current)s to %(original)s',
{'current': current_name,
'original': original_volume_name})
if original_volume_name:
with self._client.open_connection() as api:
scvolume = api.find_volume(current_name, provider_id)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
# Replicate if we are supposed to.
model_update = self._create_replications(api,
new_volume,
scvolume)
model_update['_name_id'] = None
model_update['provider_id'] = scvolume['instanceId']
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
def create_consistencygroup(self, context, group):
"""This creates a replay profile on the storage backend.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
msg = _('Unable to create consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if profile:
api.delete_replay_profile(profile)
# If we are here because we found no profile that should be fine
# as we are trying to delete it anyway.
# Trundle through the list deleting the volumes.
for volume in volumes:
self.delete_volume(volume)
volume['status'] = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
msg = _('Unable to update consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
:param snapshots: List of snapshots for this cgsnapshot.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.debug('profile %s replayid %s', profile, snapshotid)
if api.snap_cg_replay(profile, snapshotid, 0):
snapshot_updates = []
for snapshot in snapshots:
snapshot_updates.append({
'id': snapshot.id,
'status': fields.SnapshotStatus.AVAILABLE
})
model_update = {'status': 'available'}
return model_update, snapshot_updates
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
msg = _('Unable to snap Consistency Group %s') % cgid
raise exception.VolumeBackendAPIException(data=msg)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot.
If profile isn't found return success. If failed to delete the
replay (the snapshot) then raise an exception.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
msg = (_('Unable to delete Consistency Group snapshot %s')
% snapshotid)
raise exception.VolumeBackendAPIException(data=msg)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.DELETED
model_update = {'status': 'deleted'}
return model_update, snapshots
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
# Replicate if we are supposed to.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvolume = api.find_volume(volume_name, provider_id)
model_update = self._create_replications(api, volume, scvolume)
if model_update:
return model_update
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Only return a model_update if we have replication info to add.
return None
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
with self._client.open_connection() as api:
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
scvolume = api.find_volume(volume_name, provider_id)
if scvolume:
api.unmanage(scvolume)
def _get_retype_spec(self, diff, volume_name, specname, spectype):
"""Helper function to get current and requested spec.
:param diff: A difference dictionary.
:param volume_name: The volume name we are working with.
:param specname: The pretty name of the parameter.
:param spectype: The actual spec string.
:return: current, requested spec.
:raises: VolumeBackendAPIException
"""
spec = (diff['extra_specs'].get(spectype))
if spec:
if len(spec) != 2:
msg = _('Unable to retype %(specname)s, expected to receive '
'current and requested %(spectype)s values. Value '
'received: %(spec)s') % {'specname': specname,
'spectype': spectype,
'spec': spec}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
current = spec[0]
requested = spec[1]
if current != requested:
LOG.debug('Retyping volume %(vol)s to use %(specname)s '
'%(spec)s.',
{'vol': volume_name,
'specname': specname,
'spec': requested})
return current, requested
else:
LOG.info(_LI('Retype was to same Storage Profile.'))
return None, None
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s '
'diff: %(diff)s host: %(host)s'),
{'name': volume.get('id'), 'newtype': new_type,
'diff': diff, 'host': host})
model_update = None
# Any spec changes?
if diff['extra_specs']:
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
with self._client.open_connection() as api:
try:
# Get our volume
scvolume = api.find_volume(volume_name, provider_id)
if scvolume is None:
LOG.error(_LE('Retype unable to find volume %s.'),
volume_name)
return False
# Check our specs.
# Storage profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Storage Profile',
'storagetype:storageprofile'))
# if there is a change and it didn't work fast fail.
if (current != requested and not
api.update_storage_profile(scvolume, requested)):
LOG.error(_LE('Failed to update storage profile'))
return False
# Replay profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replay Profiles',
'storagetype:replayprofiles'))
# if there is a change and it didn't work fast fail.
if requested and not api.update_replay_profiles(scvolume,
requested):
LOG.error(_LE('Failed to update replay profiles'))
return False
# Replication_enabled.
current, requested = (
self._get_retype_spec(diff,
volume_name,
'replication_enabled',
'replication_enabled'))
# if there is a change and it didn't work fast fail.
if current != requested:
if requested == '<is> True':
model_update = self._create_replications(api,
volume,
scvolume)
elif current == '<is> True':
self._delete_replications(api, volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': ''}
# Active Replay
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replicate Active Replay',
'replication:activereplay'))
if current != requested and not (
api.update_replicate_active_replay(
scvolume, requested == '<is> True')):
LOG.error(_LE('Failed to apply '
'replication:activereplay setting'))
return False
# TODO(tswanson): replaytype once it actually works.
except exception.VolumeBackendAPIException:
# We do nothing with this. We simply return failure.
return False
# If we have something to send down...
if model_update:
return model_update
return True
def _parse_secondary(self, api, secondary):
"""Find the replication destination associated with secondary.
:param api: Dell StorageCenterApi
:param secondary: String indicating the secondary to failover to.
:return: Destination SSN for the given secondary.
"""
LOG.debug('_parse_secondary. Looking for %s.', secondary)
destssn = None
# Trundle through these looking for our secondary.
for backend in self.backends:
ssnstring = backend['target_device_id']
# If they list a secondary it has to match.
# If they do not list a secondary we return the first
# replication on a working system.
if not secondary or secondary == ssnstring:
# Is a string. Need an int.
ssn = int(ssnstring)
# Without the source being up we have no good
# way to pick a destination to failover to. So just
# look for one that is just up.
try:
# If the SC ssn exists use it.
if api.find_sc(ssn):
destssn = ssn
break
except exception.VolumeBackendAPIException:
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
LOG.info(_LI('replication failover secondary is %(ssn)s'),
{'ssn': destssn})
return destssn
def _update_backend(self, active_backend_id):
# Mark for failover or undo failover.
LOG.debug('active_backend_id: %s', active_backend_id)
if active_backend_id:
self.active_backend_id = six.text_type(active_backend_id)
self.failed_over = True
else:
self.active_backend_id = None
self.failed_over = False
self._client.active_backend_id = self.active_backend_id
def _get_qos(self, targetssn):
# Find our QOS.
qosnode = None
for backend in self.backends:
if int(backend['target_device_id']) == targetssn:
qosnode = backend.get('qosnode', 'cinderqos')
return qosnode
def _parse_extraspecs(self, volume):
# Digest our extra specs.
extraspecs = {}
specs = self._get_volume_extra_specs(volume)
if specs.get('replication_type') == '<in> sync':
extraspecs['replicationtype'] = 'Synchronous'
else:
extraspecs['replicationtype'] = 'Asynchronous'
if specs.get('replication:activereplay') == '<is> True':
extraspecs['activereplay'] = True
else:
extraspecs['activereplay'] = False
extraspecs['storage_profile'] = specs.get('storagetype:storageprofile')
extraspecs['replay_profile_string'] = (
specs.get('storagetype:replayprofiles'))
return extraspecs
def _wait_for_replication(self, api, items):
# Wait for our replications to resync with their original volumes.
# We wait for completion, errors or timeout.
deadcount = 5
lastremain = 0.0
# The big wait loop.
while True:
# We run until all volumes are synced or in error.
done = True
currentremain = 0.0
# Run the list.
for item in items:
# If we have one cooking.
if item['status'] == 'inprogress':
# Is it done?
synced, remain = api.replication_progress(item['screpl'])
currentremain += remain
if synced:
# It is! Get our volumes.
cvol = api.get_volume(item['cvol'])
nvol = api.get_volume(item['nvol'])
# Flip replication.
if (cvol and nvol and api.flip_replication(
cvol, nvol, item['volume']['id'],
item['specs']['replicationtype'],
item['qosnode'],
item['specs']['activereplay'])):
# rename the original. Doesn't matter if it
# succeeded as we should have the provider_id
# of the new volume.
ovol = api.get_volume(item['ovol'])
if not ovol or not api.rename_volume(
ovol, 'org:' + ovol['name']):
# Not a reason to fail but will possibly
# cause confusion so warn.
LOG.warning(_LW('Unable to locate and rename '
'original volume: %s'),
item['ovol'])
item['status'] = 'synced'
else:
item['status'] = 'error'
elif synced is None:
# Couldn't get info on this one. Call it baked.
item['status'] = 'error'
else:
# Miles to go before we're done.
done = False
# done? then leave.
if done:
break
# Confirm we are or are not still making progress.
if lastremain == currentremain:
# One chance down. Warn user.
deadcount -= 1
LOG.warning(_LW('Waiting for replications to complete. '
'No progress for 30 seconds. deadcount = %d'),
deadcount)
else:
# Reset
lastremain = currentremain
deadcount = 5
# If we've used up our 5 chances we error and log..
if deadcount == 0:
LOG.error(_LE('Replication progress has stopped.'))
for item in items:
if item['status'] == 'inprogress':
LOG.error(_LE('Failback failed for volume: %s. '
'Timeout waiting for replication to '
'sync with original volume.'),
item['volume']['id'])
item['status'] = 'error'
break
# This is part of an async call so we should be good sleeping here.
# Have to balance hammering the backend for no good reason with
# the max timeout for the unit tests. Yeah, silly.
eventlet.sleep(self.failback_timeout)
def _reattach_remaining_replications(self, api, items):
# Wiffle through our backends and reattach any remaining replication
# targets.
for item in items:
if item['status'] == 'synced':
svol = api.get_volume(item['nvol'])
# assume it went well. Will error out if not.
item['status'] = 'reattached'
# wiffle through our backends and kick off replications.
for backend in self.backends:
rssn = int(backend['target_device_id'])
if rssn != api.ssn:
rvol = api.find_repl_volume(item['volume']['id'],
rssn, None)
# if there is an old replication whack it.
api.delete_replication(svol, rssn, False)
if api.start_replication(
svol, rvol,
item['specs']['replicationtype'],
self._get_qos(rssn),
item['specs']['activereplay']):
# Save our replication_driver_data.
item['rdd'] += ','
item['rdd'] += backend['target_device_id']
else:
# No joy. Bail
item['status'] = 'error'
def _fixup_types(self, api, items):
# Update our replay profiles.
for item in items:
if item['status'] == 'reattached':
# Re-apply any appropriate replay profiles.
item['status'] = 'available'
rps = item['specs']['replay_profile_string']
if rps:
svol = api.get_volume(item['nvol'])
if not api.update_replay_profiles(svol, rps):
item['status'] = 'error'
def _volume_updates(self, items):
# Update our volume updates.
volume_updates = []
for item in items:
# Set our status for our replicated volumes
model_update = {'provider_id': item['nvol'],
'replication_driver_data': item['rdd']}
# These are simple. If the volume reaches available then,
# since we were replicating it, replication status must
# be good. Else error/error.
if item['status'] == 'available':
model_update['status'] = 'available'
model_update['replication_status'] = 'enabled'
else:
model_update['status'] = 'error'
model_update['replication_status'] = 'error'
volume_updates.append({'volume_id': item['volume']['id'],
'updates': model_update})
return volume_updates
def failback_volumes(self, volumes):
"""This is a generic volume failback.
:param volumes: List of volumes that need to be failed back.
:return: volume_updates for the list of volumes.
"""
LOG.info(_LI('failback_volumes'))
with self._client.open_connection() as api:
# Get our qosnode. This is a good way to make sure the backend
# is still setup so that we can do this.
qosnode = self._get_qos(api.ssn)
if not qosnode:
raise exception.VolumeBackendAPIException(
message=_('Unable to failback. Backend is misconfigured.'))
volume_updates = []
replitems = []
screplid = None
status = ''
# Trundle through the volumes. Update non replicated to alive again
# and reverse the replications for the remaining volumes.
for volume in volumes:
LOG.info(_LI('failback_volumes: starting volume: %s'), volume)
model_update = {}
if volume.get('replication_driver_data'):
LOG.info(_LI('failback_volumes: replicated volume'))
# Get our current volume.
cvol = api.find_volume(volume['id'], volume['provider_id'])
# Original volume on the primary.
ovol = api.find_repl_volume(volume['id'], api.primaryssn,
None, True, False)
# Delete our current mappings.
api.remove_mappings(cvol)
# If there is a replication to delete do so.
api.delete_replication(ovol, api.ssn, False)
# Replicate to a common replay.
screpl = api.replicate_to_common(cvol, ovol, 'tempqos')
# We made it this far. Update our status.
if screpl:
screplid = screpl['instanceId']
nvolid = screpl['destinationVolume']['instanceId']
status = 'inprogress'
else:
LOG.error(_LE('Unable to restore %s'), volume['id'])
screplid = None
nvolid = None
status = 'error'
# Save some information for the next step.
# nvol is the new volume created by replicate_to_common.
# We also grab our extra specs here.
replitems.append(
{'volume': volume,
'specs': self._parse_extraspecs(volume),
'qosnode': qosnode,
'screpl': screplid,
'cvol': cvol['instanceId'],
'ovol': ovol['instanceId'],
'nvol': nvolid,
'rdd': six.text_type(api.ssn),
'status': status})
else:
# Not replicated. Just set it to available.
model_update = {'status': 'available'}
# Either we are failed over or our status is now error.
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
if replitems:
# Wait for replication to complete.
# This will also flip replication.
self._wait_for_replication(api, replitems)
# Replications are done. Attach to any additional replication
# backends.
self._reattach_remaining_replications(api, replitems)
self._fixup_types(api, replitems)
volume_updates += self._volume_updates(replitems)
# Set us back to a happy state.
# The only way this doesn't happen is if the primary is down.
self._update_backend(None)
return volume_updates
def failover_host(self, context, volumes, secondary_id=None):
"""Failover to secondary.
:param context: security context
:param secondary_id: Specifies rep target to fail over to
:param volumes: List of volumes serviced by this backend.
:returns: destssn, volume_updates data structure
Example volume_updates data structure:
.. code-block:: json
[{'volume_id': <cinder-uuid>,
'updates': {'provider_id': 8,
'replication_status': 'failed-over',
'replication_extended_status': 'whatever',...}},]
"""
LOG.debug('failover-host')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
if self.failed_over:
if secondary_id == 'default':
LOG.debug('failing back')
return 'default', self.failback_volumes(volumes)
raise exception.VolumeBackendAPIException(
message='Already failed over.')
LOG.info(_LI('Failing backend to %s'), secondary_id)
# basic check
if self.replication_enabled:
with self._client.open_connection() as api:
# Look for the specified secondary.
destssn = self._parse_secondary(api, secondary_id)
if destssn:
# We roll through trying to break replications.
# Is failing here a complete failure of failover?
volume_updates = []
for volume in volumes:
model_update = {}
if volume.get('replication_driver_data'):
rvol = api.break_replication(
volume['id'], volume.get('provider_id'),
destssn)
if rvol:
LOG.info(_LI('Success failing over volume %s'),
volume['id'])
else:
LOG.info(_LI('Failed failing over volume %s'),
volume['id'])
# We should note that we are now failed over
# and that we have a new instanceId.
model_update = {
'replication_status': 'failed-over',
'provider_id': rvol['instanceId']}
else:
# Not a replicated volume. Try to unmap it.
scvolume = api.find_volume(
volume['id'], volume.get('provider_id'))
api.remove_mappings(scvolume)
model_update = {'status': 'error'}
# Either we are failed over or our status is now error.
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
# this is it.
self._update_backend(destssn)
LOG.debug('after update backend')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
return destssn, volume_updates
else:
raise exception.InvalidInput(message=(
_('replication_failover failed. %s not found.') %
secondary_id))
# I don't think we should ever get here.
raise exception.VolumeBackendAPIException(message=(
_('replication_failover failed. '
'Backend not configured for failover')))
def _get_unmanaged_replay(self, api, volume_name, provider_id,
existing_ref):
replay_name = None
if existing_ref:
replay_name = existing_ref.get('source-name')
if not replay_name:
msg = _('_get_unmanaged_replay: Must specify source-name.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Find our volume.
scvolume = api.find_volume(volume_name, provider_id)
if not scvolume:
# Didn't find it.
msg = (_('_get_unmanaged_replay: Cannot find volume id %s')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Find our replay.
screplay = api.find_replay(scvolume, replay_name)
if not screplay:
# Didn't find it. Reference must be invalid.
msg = (_('_get_unmanaged_replay: Cannot '
'find snapshot named %s') % replay_name)
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return screplay
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
snapshot structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the
snapshot['name'] which is how drivers traditionally map between a
cinder snapshot and the associated backend storage object.
2. Place some metadata on the snapshot, or somewhere in the backend,
that allows other driver requests (e.g. delete) to locate the
backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
"""
with self._client.open_connection() as api:
# Find our unmanaged snapshot. This will raise on error.
volume_name = snapshot.get('volume_id')
provider_id = snapshot.get('provider_id')
snapshot_id = snapshot.get('id')
screplay = self._get_unmanaged_replay(api, volume_name,
provider_id, existing_ref)
# Manage means update description and update expiration.
if not api.manage_replay(screplay, snapshot_id):
# That didn't work. Error.
msg = (_('manage_existing_snapshot: Error managing '
'existing replay %(ss)s on volume %(vol)s') %
{'ss': screplay.get('description'),
'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Life is good. Let the world know what we've done.
LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on '
'volume %(volume)s has been renamed to %(id)s and is '
'now managed by Cinder.'),
{'exist': screplay.get('description'),
'volume': volume_name,
'id': snapshot_id})
return {'provider_id': screplay['createVolume']['instanceId']}
# NOTE: Can't use abstractmethod before all drivers implement it
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
volume_name = snapshot.get('volume_id')
provider_id = snapshot.get('provider_id')
with self._client.open_connection() as api:
screplay = self._get_unmanaged_replay(api, volume_name,
provider_id, existing_ref)
sz, rem = dell_storagecenter_api.StorageCenterApi.size_to_gb(
screplay['size'])
if rem > 0:
raise exception.VolumeBackendAPIException(
data=_('Volume size must be a multiple of 1 GB.'))
return sz
# NOTE: Can't use abstractmethod before all drivers implement it
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
NOTE: We do set the expire countdown to 1 day. Once a snapshot is
unmanaged it will expire 24 hours later.
"""
with self._client.open_connection() as api:
snapshot_id = snapshot.get('id')
# provider_id is the snapshot's parent volume's instanceId.
provider_id = snapshot.get('provider_id')
volume_name = snapshot.get('volume_id')
# Find our volume.
scvolume = api.find_volume(volume_name, provider_id)
if not scvolume:
# Didn't find it.
msg = (_('unmanage_snapshot: Cannot find volume id %s')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Find our replay.
screplay = api.find_replay(scvolume, snapshot_id)
if not screplay:
# Didn't find it. Reference must be invalid.
msg = (_('unmanage_snapshot: Cannot find snapshot named %s')
% snapshot_id)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Free our snapshot.
api.unmanage_replay(screplay)
# Do not check our result.
|
{
"content_hash": "9150accc13a24d759f2800e3b661ba19",
"timestamp": "",
"source": "github",
"line_count": 1524,
"max_line_length": 79,
"avg_line_length": 46.02165354330709,
"alnum_prop": 0.5278098578496372,
"repo_name": "bswartz/cinder",
"id": "0997a5cdb6502373b521d23a603fd8a6af339b0c",
"size": "70742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/dell/dell_storagecenter_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16345375"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import doctest
import os
import unittest
import sys
try:
from babel import Locale
locale_en = Locale.parse('en_US')
except ImportError:
locale_en = None
from trac.config import Configuration
from trac.core import Component, ComponentManager
from trac.env import Environment
from trac.db.api import _parse_db_str, DatabaseManager
from trac.db.sqlite_backend import SQLiteConnection
from trac.db.util import ConnectionWrapper
import trac.db.postgres_backend
import trac.db.mysql_backend
from trac.ticket.default_workflow import load_workflow_config_snippet
from trac.util import translation
def Mock(bases=(), *initargs, **kw):
"""
Simple factory for dummy classes that can be used as replacement for the
real implementation in tests.
Base classes for the mock can be specified using the first parameter, which
must be either a tuple of class objects or a single class object. If the
bases parameter is omitted, the base class of the mock will be object.
So to create a mock that is derived from the builtin dict type, you can do:
>>> mock = Mock(dict)
>>> mock['foo'] = 'bar'
>>> mock['foo']
'bar'
Attributes of the class are provided by any additional keyword parameters.
>>> mock = Mock(foo='bar')
>>> mock.foo
'bar'
Objects produces by this function have the special feature of not requiring
the 'self' parameter on methods, because you should keep data at the scope
of the test function. So you can just do:
>>> mock = Mock(add=lambda x,y: x+y)
>>> mock.add(1, 1)
2
To access attributes from the mock object from inside a lambda function,
just access the mock itself:
>>> mock = Mock(dict, do=lambda x: 'going to the %s' % mock[x])
>>> mock['foo'] = 'bar'
>>> mock.do('foo')
'going to the bar'
Because assignments or other types of statements don't work in lambda
functions, assigning to a local variable from a mock function requires some
extra work:
>>> myvar = [None]
>>> mock = Mock(set=lambda x: myvar.__setitem__(0, x))
>>> mock.set(1)
>>> myvar[0]
1
"""
if not isinstance(bases, tuple):
bases = (bases,)
cls = type('Mock', bases, {})
mock = cls(*initargs)
for k, v in kw.items():
setattr(mock, k, v)
return mock
class MockPerm(object):
"""Fake permission class. Necessary as Mock can not be used with operator
overloading."""
username = ''
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return True
__contains__ = has_permission
def __call__(self, realm_or_resource, id=False, version=False):
return self
def require(self, action, realm_or_resource=None, id=False, version=False):
pass
assert_permission = require
class TestSetup(unittest.TestSuite):
"""
Test suite decorator that allows a fixture to be setup for a complete
suite of test cases.
"""
def setUp(self):
"""Sets up the fixture, and sets self.fixture if needed"""
pass
def tearDown(self):
"""Tears down the fixture"""
pass
def run(self, result):
"""Setup the fixture (self.setUp), call .setFixture on all the tests,
and tear down the fixture (self.tearDown)."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite.run(self, result)
self.tearDown()
return result
def _wrapped_run(self, *args, **kwargs):
"Python 2.7 / unittest2 compatibility - there must be a better way..."
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite._wrapped_run(self, *args, **kwargs)
self.tearDown()
class TestCaseSetup(unittest.TestCase):
def setFixture(self, fixture):
self.fixture = fixture
# -- Database utilities
def get_dburi():
dburi = os.environ.get('TRAC_TEST_DB_URI')
if dburi:
scheme, db_prop = _parse_db_str(dburi)
# Assume the schema 'tractest' for Postgres
if scheme == 'postgres' and \
not db_prop.get('params', {}).get('schema'):
if '?' in dburi:
dburi += "&schema=tractest"
else:
dburi += "?schema=tractest"
return dburi
return 'sqlite::memory:'
def reset_sqlite_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
with env.db_transaction as db:
tables = db("SELECT name FROM sqlite_master WHERE type='table'")
for table in tables:
db("DELETE FROM %s" % table)
return tables
def reset_postgres_db(env, db_prop):
with env.db_transaction as db:
dbname = db.schema
if dbname:
# reset sequences
# information_schema.sequences view is available in PostgreSQL 8.2+
# however Trac supports PostgreSQL 8.0+, uses
# pg_get_serial_sequence()
for seq in db("""
SELECT sequence_name FROM (
SELECT pg_get_serial_sequence(%s||table_name,
column_name)
AS sequence_name
FROM information_schema.columns
WHERE table_schema=%s) AS tab
WHERE sequence_name IS NOT NULL""",
(dbname + '.', dbname)):
db("ALTER SEQUENCE %s RESTART WITH 1" % seq)
# clear tables
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
db("DELETE FROM %s" % table)
# PostgreSQL supports TRUNCATE TABLE as well
# (see http://www.postgresql.org/docs/8.1/static/sql-truncate.html)
# but on the small tables used here, DELETE is actually much faster
return tables
def reset_mysql_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
if dbname:
with env.db_transaction as db:
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
# TRUNCATE TABLE is prefered to DELETE FROM, as we need to reset
# the auto_increment in MySQL.
db("TRUNCATE TABLE %s" % table)
return tables
# -- Environment stub
class EnvironmentStub(Environment):
"""A stub of the trac.env.Environment object for testing."""
href = abs_href = None
global_databasemanager = None
def __init__(self, default_data=False, enable=None, disable=None,
path=None, destroying=False):
"""Construct a new Environment stub object.
:param default_data: If True, populate the database with some
defaults.
:param enable: A list of component classes or name globs to
activate in the stub environment.
"""
ComponentManager.__init__(self)
Component.__init__(self)
self.systeminfo = []
import trac
self.path = path
if self.path is None:
self.path = os.path.dirname(trac.__file__)
if not os.path.isabs(self.path):
self.path = os.path.join(os.getcwd(), self.path)
# -- configuration
self.config = Configuration(None)
# We have to have a ticket-workflow config for ''lots'' of things to
# work. So insert the basic-workflow config here. There may be a
# better solution than this.
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.set('logging', 'log_level', 'DEBUG')
self.config.set('logging', 'log_type', 'stderr')
if enable is not None:
self.config.set('components', 'trac.*', 'disabled')
else:
self.config.set('components', 'tracopt.versioncontrol.*',
'enabled')
for name_or_class in enable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'enabled')
for name_or_class in disable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'disabled')
# -- logging
from trac.log import logger_handler_factory
self.log, self._log_handler = logger_handler_factory('test')
# -- database
self.config.set('components', 'trac.db.*', 'enabled')
self.dburi = get_dburi()
init_global = False
if self.global_databasemanager:
self.components[DatabaseManager] = global_databasemanager
else:
self.config.set('trac', 'database', self.dburi)
self.global_databasemanager = DatabaseManager(self)
self.config.set('trac', 'debug_sql', True)
self.config.set('logging', 'log_type', 'stderr')
self.config.set('logging', 'log_level', 'DEBUG')
init_global = not destroying
if default_data or init_global:
self.reset_db(default_data)
from trac.web.href import Href
self.href = Href('/trac.cgi')
self.abs_href = Href('http://example.org/trac.cgi')
self.known_users = []
translation.activate(locale_en)
def reset_db(self, default_data=None):
"""Remove all data from Trac tables, keeping the tables themselves.
:param default_data: after clean-up, initialize with default data
:return: True upon success
"""
from trac import db_default
scheme, db_prop = _parse_db_str(self.dburi)
tables = []
remove_sqlite_db = False
try:
with self.db_transaction as db:
db.rollback() # make sure there's no transaction in progress
# check the database version
database_version = db(
"SELECT value FROM system WHERE name='database_version'")
if database_version:
database_version = int(database_version[0][0])
if database_version == db_default.db_version:
# same version, simply clear the tables (faster)
m = sys.modules[__name__]
reset_fn = 'reset_%s_db' % scheme
if hasattr(m, reset_fn):
tables = getattr(m, reset_fn)(self, db_prop)
else:
# different version or version unknown, drop the tables
remove_sqlite_db = True
self.destroy_db(scheme, db_prop)
except Exception, e:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
db = None # as we might shutdown the pool FIXME no longer needed!
if scheme == 'sqlite' and remove_sqlite_db:
path = db_prop['path']
if path != ':memory:':
if not os.path.isabs(path):
path = os.path.join(self.path, path)
self.global_databasemanager.shutdown()
os.remove(path)
if not tables:
self.global_databasemanager.init_db()
# we need to make sure the next get_db_cnx() will re-create
# a new connection aware of the new data model - see #8518.
if self.dburi != 'sqlite::memory:':
self.global_databasemanager.shutdown()
with self.db_transaction as db:
if default_data:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols),
','.join(['%s' for c in cols])),
vals)
else:
db("INSERT INTO system (name, value) VALUES (%s, %s)",
('database_version', str(db_default.db_version)))
def destroy_db(self, scheme=None, db_prop=None):
if not (scheme and db_prop):
scheme, db_prop = _parse_db_str(self.dburi)
try:
with self.db_transaction as db:
if scheme == 'postgres' and db.schema:
db('DROP SCHEMA "%s" CASCADE' % db.schema)
elif scheme == 'mysql':
dbname = os.path.basename(db_prop['path'])
for table in db("""
SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,)):
db("DROP TABLE IF EXISTS `%s`" % table)
except Exception:
# "TracError: Database not found...",
# psycopg2.ProgrammingError: schema "tractest" does not exist
pass
return False
# overriden
def is_component_enabled(self, cls):
if self._component_name(cls).startswith('__main__.'):
return True
return Environment.is_component_enabled(self, cls)
def get_known_users(self, cnx=None):
return self.known_users
def locate(fn):
"""Locates a binary on the path.
Returns the fully-qualified path, or None.
"""
exec_suffix = '.exe' if os.name == 'nt' else ''
for p in ["."] + os.environ['PATH'].split(os.pathsep):
f = os.path.join(p, fn + exec_suffix)
if os.path.exists(f):
return f
return None
INCLUDE_FUNCTIONAL_TESTS = True
def suite():
import trac.tests
import trac.admin.tests
import trac.db.tests
import trac.mimeview.tests
import trac.ticket.tests
import trac.util.tests
import trac.versioncontrol.tests
import trac.versioncontrol.web_ui.tests
import trac.web.tests
import trac.wiki.tests
import tracopt.mimeview.tests
import tracopt.perm.tests
import tracopt.versioncontrol.git.tests
import tracopt.versioncontrol.svn.tests
suite = unittest.TestSuite()
suite.addTest(trac.tests.basicSuite())
if INCLUDE_FUNCTIONAL_TESTS:
suite.addTest(trac.tests.functionalSuite())
suite.addTest(trac.admin.tests.suite())
suite.addTest(trac.db.tests.suite())
suite.addTest(trac.mimeview.tests.suite())
suite.addTest(trac.ticket.tests.suite())
suite.addTest(trac.util.tests.suite())
suite.addTest(trac.versioncontrol.tests.suite())
suite.addTest(trac.versioncontrol.web_ui.tests.suite())
suite.addTest(trac.web.tests.suite())
suite.addTest(trac.wiki.tests.suite())
suite.addTest(tracopt.mimeview.tests.suite())
suite.addTest(tracopt.perm.tests.suite())
suite.addTest(tracopt.versioncontrol.git.tests.suite())
suite.addTest(tracopt.versioncontrol.svn.tests.suite())
suite.addTest(doctest.DocTestSuite(sys.modules[__name__]))
return suite
if __name__ == '__main__':
#FIXME: this is a bit inelegant
if '--skip-functional-tests' in sys.argv:
sys.argv.remove('--skip-functional-tests')
INCLUDE_FUNCTIONAL_TESTS = False
unittest.main(defaultTest='suite')
|
{
"content_hash": "1fbcfe54416e109536c95028fc948b05",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 80,
"avg_line_length": 35.81651376146789,
"alnum_prop": 0.5830558401639344,
"repo_name": "dafrito/trac-mirror",
"id": "93bd8f55f2e31f307ee6d3c002fa7d080dbc648b",
"size": "16346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trac/test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78549"
},
{
"name": "JavaScript",
"bytes": "80640"
},
{
"name": "Perl",
"bytes": "2616"
},
{
"name": "Python",
"bytes": "2978350"
},
{
"name": "Shell",
"bytes": "11207"
}
],
"symlink_target": ""
}
|
import numpy as np
from onnx import numpy_helper
def get_node_index(model, node):
i = 0
while i < len(model.graph.node):
if model.graph.node[i] == node:
break
i += 1
return i if i < len(model.graph.node) else None
def add_const(model, name, output, t_value=None, f_value=None):
const_node = model.graph.node.add()
const_node.op_type = "Constant"
const_node.name = name
const_node.output.extend([output])
attr = const_node.attribute.add()
attr.name = "value"
if t_value is not None:
attr.type = 4
attr.t.CopyFrom(t_value)
else:
attr.type = 1
attr.f = f_value
return const_node
def process_dropout(model):
dropouts = []
index = 0
for node in model.graph.node:
if node.op_type == "Dropout":
new_dropout = model.graph.node.add()
new_dropout.op_type = "TrainableDropout"
new_dropout.name = "TrainableDropout_%d" % index
# make ratio node
ratio = np.asarray([node.attribute[0].f], dtype=np.float32)
print(ratio.shape)
ratio_value = numpy_helper.from_array(ratio)
ratio_node = add_const(
model,
"dropout_node_ratio_%d" % index,
"dropout_node_ratio_%d" % index,
t_value=ratio_value,
)
print(ratio_node)
new_dropout.input.extend([node.input[0], ratio_node.output[0]])
new_dropout.output.extend(node.output)
dropouts.append(get_node_index(model, node))
index += 1
dropouts.sort(reverse=True)
for d in dropouts:
del model.graph.node[d]
model.opset_import[0].version = 10
|
{
"content_hash": "dc6dcf30ba9d6c8894324b4877b1f054",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 31.178571428571427,
"alnum_prop": 0.5664375715922108,
"repo_name": "microsoft/onnxruntime",
"id": "3d3feca06a99bd6495b9d3efbc5b6062301e4bc3",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnxruntime/test/python/onnxruntime_test_training_unittest_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
"""Functions for discovering and clearing the build directory."""
import os
import sys
def IsFileNewerThanFile(file_a, file_b):
"""Returns True if file_a's mtime is newer than file_b's."""
def getmtime(f):
try:
return os.path.getmtime(f)
except os.error:
return 0
return getmtime(file_a) >= getmtime(file_b)
def AreNinjaFilesNewerThanXcodeFiles(src_dir=None):
"""Returns True if the generated ninja files are newer than the generated
xcode files.
Parameters:
src_dir: The path to the src directory. If None, it's assumed to be
at src/ relative to the current working directory.
"""
src_dir = src_dir or 'src'
ninja_path = os.path.join(src_dir, 'out', 'Release', 'build.ninja')
xcode_path = os.path.join(
src_dir, 'build', 'all.xcodeproj', 'project.pbxproj')
return IsFileNewerThanFile(ninja_path, xcode_path)
def GetBuildOutputDirectory(src_dir=None, cros_board=None):
"""Returns the path to the build directory, relative to the checkout root.
Assumes that the current working directory is the checkout root.
"""
# src_dir is only needed for compiling v8, which uses compile.py (but no other
# of the build scripts), but its source root isn't "src" -- crbug.com/315004
if src_dir is None:
src_dir = 'src'
if sys.platform.startswith('linux'):
out_dirname = 'out'
if cros_board:
# Simple chrome workflow output (e.g., "out_x86-generic")
out_dirname += '_%s' % (cros_board,)
return os.path.join(src_dir, out_dirname)
assert not cros_board, "'cros_board' not supported on this platform"
if sys.platform == 'cygwin' or sys.platform.startswith('win') or (
sys.platform == 'darwin'):
return os.path.join(src_dir, 'out')
raise NotImplementedError('Unexpected platform %s' % sys.platform)
|
{
"content_hash": "918c5e833ce9eb00d22483d35e637e88",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 33.629629629629626,
"alnum_prop": 0.6850220264317181,
"repo_name": "chromium/chromium",
"id": "172109f21dd298772509d8e793ea546dc3191c11",
"size": "1957",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "infra/scripts/build_directory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from bokeh.core.properties import Any, Dict, Instance, String
from bokeh.models import ColumnDataSource, LayoutDOM
# This defines some default options for the Graph3d feature of vis.js
# See: http://visjs.org/graph3d_examples.html for more details. Note
# that we are fixing the size of this component, in ``options``, but
# with additional work it could be made more responsive.
DEFAULTS = {
'width': '600px',
'height': '600px',
'style': 'surface',
'showPerspective': True,
'showGrid': True,
'keepAspectRatio': True,
'verticalRatio': 1.0,
'legendLabel': 'stuff',
'cameraPosition': {
'horizontal': -0.35,
'vertical': 0.22,
'distance': 1.8,
}
}
# This custom extension model will have a DOM view that should layout-able in
# Bokeh layouts, so use ``LayoutDOM`` as the base class. If you wanted to create
# a custom tool, you could inherit from ``Tool``, or from ``Glyph`` if you
# wanted to create a custom glyph, etc.
class Surface3d(LayoutDOM):
# The special class attribute ``__implementation__`` should contain a string
# of JavaScript (or TypeScript) code that implements the JavaScript side
# of the custom extension model.
__implementation__ = "surface3d.ts"
# Below are all the "properties" for this model. Bokeh properties are
# class attributes that define the fields (and their types) that can be
# communicated automatically between Python and the browser. Properties
# also support type validation. More information about properties in
# can be found here:
#
# https://bokeh.pydata.org/en/latest/docs/reference/core.html#bokeh-core-properties
# This is a Bokeh ColumnDataSource that can be updated in the Bokeh
# server by Python code
data_source = Instance(ColumnDataSource)
# The vis.js library that we are wrapping expects data for x, y, and z.
# The data will actually be stored in the ColumnDataSource, but these
# properties let us specify the *name* of the column that should be
# used for each field.
x = String
y = String
z = String
# Any of the available vis.js options for Graph3d can be set by changing
# the contents of this dictionary.
options = Dict(String, Any, default=DEFAULTS)
|
{
"content_hash": "1730e9314a81480c2f381b027c09ae8c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 90,
"avg_line_length": 39.42372881355932,
"alnum_prop": 0.6831470335339639,
"repo_name": "stonebig/bokeh",
"id": "b5856aade99303cf76303cbf9ec1b8aa9ad00a85",
"size": "2326",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/app/surface3d/surface3d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.