text stringlengths 4 1.02M | meta dict |
|---|---|
import sciunit
from sciunit.scores import BooleanScore
# import morphounit.capabilities as cap
import morphounit.plots as plots
import os
from subprocess import call
import shlex
import json
from datetime import datetime
import matplotlib.backends.backend_pdf
from neurom.apps.cut_plane_detection import find_cut_plane
from neurom import load_neuron
import numpy
#==============================================================================
class NeuroM_MorphoCheck(sciunit.Test):
"""
Tests morphologies using NeuroM's `morph_check` feature.
Returns `True` if all checks passed successfully; else `False`.
"""
score_type = BooleanScore
def __init__(self,
observation=None,
name="NeuroM MorphCheck",
base_directory=None):
description = ("Tests morphologies using NeuroM's `morph_check` feature")
# required_capabilities = (cap.HandlesNeuroM,)
self.observation = observation
if not base_directory:
base_directory = "."
self.base_directory = base_directory
self.figures = []
sciunit.Test.__init__(self, self.observation, name)
#----------------------------------------------------------------------
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
self.model_version = model.model_version
self.path_test_output = os.path.join(self.base_directory, 'validation_results', 'neuroM_morph_hardChecks', self.model_version, datetime.now().strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(self.path_test_output):
os.makedirs(self.path_test_output)
# note: observation here is either the contents of the config file or a local path
# if local path load contents
if not isinstance(self.observation, dict):
with open(self.observation) as f:
self.observation = json.load(f)
# save morph_check config as local file
morph_check_config_file = os.path.join(self.path_test_output, "morph_check_config.json")
with open(morph_check_config_file,'w') as f:
json.dump(self.observation["morph_check"], f, indent=4)
cut_plane_config = self.observation["cut_plane"]
morhpcheck_output_file = os.path.join(self.path_test_output, "morph_check_output.json")
call(shlex.split(f"morph_check -C {morph_check_config_file} -o {morhpcheck_output_file} {model.morph_path}"))
with open(morhpcheck_output_file) as json_data:
prediction = json.load(json_data)
cut_plane_output_json = find_cut_plane(load_neuron(model.morph_path), bin_width=cut_plane_config["bin_width"], display=True)
cut_plane_figure_list = []
for key in cut_plane_output_json["figures"].keys():
cut_plane_figure_list.append(cut_plane_output_json["figures"][key][0])
cutplane_output_pdf = os.path.join(self.path_test_output, "cut_plane_figures.pdf")
cut_plane_pdf = matplotlib.backends.backend_pdf.PdfPages(cutplane_output_pdf)
for fig in range(1, len(cut_plane_figure_list)+1):
cut_plane_pdf.savefig(fig)
cut_plane_pdf.close()
cutplane_output_file = os.path.join(self.path_test_output, "cut_plane_output.json")
cut_plane_output_json.pop("figures")
cut_plane_output_json["cut_leaves"] = cut_plane_output_json["cut_leaves"].tolist()
def convert(o):
if isinstance(o, numpy.int64): return int(o)
raise TypeError
with open(cutplane_output_file, "w") as outfile:
json.dump(cut_plane_output_json, outfile, indent=4, default=convert)
self.figures.append(morhpcheck_output_file)
self.figures.append(cutplane_output_file)
self.figures.append(cutplane_output_pdf)
return prediction
#----------------------------------------------------------------------
def compute_score(self, observation, prediction):
"""Implementation of sciunit.Test.score_prediction."""
score_dict = {"PASS":True, "FAIL":False}
self.score = BooleanScore(score_dict[prediction["STATUS"]])
self.score.description = "Boolean: True = Pass / False = Fail"
return self.score
#----------------------------------------------------------------------
def bind_score(self, score, model, observation, prediction):
score.related_data["figures"] = self.figures
score.related_data["passed"] = score.score
return score
| {
"content_hash": "cbfa4be5a63488cbe043c62ac8332dc1",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 176,
"avg_line_length": 43.50476190476191,
"alnum_prop": 0.6155866900175131,
"repo_name": "pedroernesto/morphounit",
"id": "04e34460e65044d48179cc6c247d0ee72cdd69b1",
"size": "4568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morphounit/tests/morph_cells/test_NeuroM_MorphCheck.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93390"
}
],
"symlink_target": ""
} |
"""
Flask-Uploads
-------------
Flask-Uploads provides flexible upload handling for Flask applications. It
lets you divide your uploads into sets that the application user can publish
separately.
Links
`````
* `documentation <http://packages.python.org/Flask-Uploads>`_
* `development version
<http://bitbucket.org/leafstorm/flask-uploads/get/tip.gz#egg=Flask-Uploads-dev>`_
"""
from setuptools import setup
setup(
name='Flask-Uploads',
version='0.1.4',
url='http://bitbucket.org/leafstorm/flask-uploads/',
license='MIT',
author='Matthew "LeafStorm" Frazier',
author_email='leafstormrush@gmail.com',
description='Flexible and efficient upload handling for Flask',
long_description=__doc__,
py_modules = ['flask_uploads'],
# packages=['flaskext'],
# namespace_packages=['flaskext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=0.5'
],
tests_require='nose',
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "5aa44eb88bb8103a37c20f1de16f5922",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 83,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.6501742160278746,
"repo_name": "codecool/flask-uploads",
"id": "5d0e00cd2d0974542eb9b15066d0aa0d3c797065",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48904"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SliderPlugin.show_ribbon'
db.delete_column('cmsplugin_sliderplugin', 'show_ribbon')
# Renaming field 'SliderPlugin.image_height'
db.rename_column('cmsplugin_sliderplugin', 'image_height', 'height')
# Renaming field 'SliderPlugin.image_width'
db.rename_column('cmsplugin_sliderplugin', 'image_width', 'width')
def backwards(self, orm):
# Adding field 'SliderPlugin.show_ribbon'
db.add_column('cmsplugin_sliderplugin', 'show_ribbon',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Renaming field 'SliderPlugin.image_height'
db.rename_column('cmsplugin_sliderplugin', 'height', 'image_height')
# Renaming field 'SliderPlugin.image_width'
db.rename_column('cmsplugin_sliderplugin', 'width', 'image_width')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_nivoslider.slideralbum': {
'Meta': {'object_name': 'SliderAlbum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmsplugin_nivoslider.SliderImage']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'cmsplugin_nivoslider.sliderimage': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'SliderImage'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'})
},
'cmsplugin_nivoslider.sliderplugin': {
'Meta': {'object_name': 'SliderPlugin', 'db_table': "'cmsplugin_sliderplugin'", '_ormbases': ['cms.CMSPlugin']},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_nivoslider.SliderAlbum']"}),
'anim_speed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '500'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'default': "'random'", 'max_length': '50'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pause_time': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3000'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_nivoslider']
| {
"content_hash": "e0441fbadffe762f8c7731ba6c74db31",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 170,
"avg_line_length": 62.207317073170735,
"alnum_prop": 0.5796902568123897,
"repo_name": "samirasnoun/django_cms_gallery_image",
"id": "62a419cce501cc65926651451ca35e2ccf498857",
"size": "5126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_nivoslider/migrations/0006_rename_fields_image_height_width__del_field_show_ribbon.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245718"
},
{
"name": "JavaScript",
"bytes": "1060264"
},
{
"name": "Makefile",
"bytes": "2973"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3309714"
},
{
"name": "Ruby",
"bytes": "1980"
},
{
"name": "XSLT",
"bytes": "10244"
}
],
"symlink_target": ""
} |
import gzip
import logging
import os
import shutil
import time
from abc import ABC, abstractmethod
import numpy as np
import requests
from astropy.io import fits
from astropy.io.ascii import ecsv
from astropy.table import Table
from astropy.utils.data import clear_download_cache
from ..utils import makedirs_if_needed
from ..version import __version__
try:
FileExistsError # pylint: disable=used-before-assignment
except NameError:
FileExistsError = OSError # pylint: disable=redefined-builtin
# Hot fix on ECSV_DATATYPES for newer astropy versions
if hasattr(ecsv, "ECSV_DATATYPES") and "object" not in ecsv.ECSV_DATATYPES:
ecsv.ECSV_DATATYPES = ecsv.ECSV_DATATYPES + ("object",)
__all__ = [
"DownloadableBase",
"DataObject",
"FileObject",
"CsvTable",
"FastCsvTable",
"EcsvTable",
"GoogleSheets",
"FitsTableGeneric",
"FitsTable",
"NumpyBinary",
]
class DownloadableBase(ABC):
@abstractmethod
def download_as_file(self, file_path, overwrite=False, compress=False):
pass
class FileObject(DownloadableBase):
"""
A simple class for file reading (to astropy Table), writing (from astropy Table),
and download.
Parameters
----------
path : str
path or URL to the file
**kwargs :
other keyword arguments to pass to astropy.table.Table.read
"""
_read_default_kwargs = dict()
_write_default_kwargs = dict()
def __init__(self, path=None, **kwargs):
self.path = path
self.kwargs = kwargs
@staticmethod
def _gz_fallback(file_path):
file_path_str = str(file_path)
file_path_alt = file_path_str[:-3] if file_path_str.lower().endswith(".gz") else (file_path_str + ".gz")
if (not os.path.isfile(file_path)) and os.path.isfile(file_path_alt):
return file_path_alt
return file_path
def read(self):
kwargs_this = dict(self._read_default_kwargs, **self.kwargs)
path = self._gz_fallback(self.path)
return Table.read(path, **kwargs_this)
def write(self, table, **kwargs):
kwargs_this = dict(self._write_default_kwargs, **kwargs)
makedirs_if_needed(self.path)
return table.write(self.path, **kwargs_this)
def download_as_file(self, file_path, overwrite=False, compress=False):
makedirs_if_needed(file_path)
if overwrite or not os.path.isfile(file_path):
try:
r = requests.get(self.path, stream=True, timeout=(120, 3600))
except requests.exceptions.MissingSchema:
shutil.copy(self.path, file_path)
else:
file_open = gzip.open if (compress or file_path.endswith(".gz")) else open
try:
with file_open(file_path, "wb") as f:
for chunk in r.iter_content(chunk_size=(16 * 1024 * 1024)):
f.write(chunk)
except: # noqa: E722
if os.path.isfile(file_path):
os.unlink(file_path)
raise
finally:
r.close()
def isfile(self):
if self.path:
return os.path.isfile(self._gz_fallback(self.path))
return False
class CsvTable(FileObject):
_read_default_kwargs = dict(format="ascii.csv")
_write_default_kwargs = dict(format="ascii.csv", overwrite=True)
class FastCsvTable(FileObject):
_read_default_kwargs = dict(format="ascii.fast_csv")
_write_default_kwargs = dict(format="ascii.fast_csv", overwrite=True)
class EcsvTable(FileObject):
_read_default_kwargs = dict(format="ascii.ecsv")
_write_default_kwargs = dict(format="ascii.ecsv", overwrite=True)
class GoogleSheets(FastCsvTable):
def __init__(self, key, gid, **kwargs):
path = "https://docs.google.com/spreadsheets/d/{0}/export?format=csv&gid={1}".format(key, gid)
self.url = "https://docs.google.com/spreadsheets/d/{0}/edit#gid={1}".format(key, gid)
super(GoogleSheets, self).__init__(path, **kwargs)
def read(self):
clear_download_cache(self.path)
return super(GoogleSheets, self).read()
def write(self, table, **kwargs):
raise NotImplementedError
class FitsTableGeneric(FileObject):
_read_default_kwargs = dict(memmap=True)
_write_default_kwargs = dict(format="fits", overwrite=True)
class FitsTable(FileObject):
compress_after_write = False
_read_default_kwargs = dict(cache=False, memmap=True)
_write_default_kwargs = dict(format="fits", overwrite=True)
def read(self):
kwargs_this = dict(self._read_default_kwargs, **self.kwargs)
path = self._gz_fallback(self.path)
try:
hdu_list = fits.open(path, **kwargs_this)
except OSError:
# this helps fits.open guess the compression better
hdu_list = fits.open(open(path, "rb"), **kwargs_this)
try:
t = Table(hdu_list[1].data, masked=False)
finally:
try:
del hdu_list[1].data # pylint: disable=no-member
hdu_list.close()
del hdu_list
except: # pylint: disable=bare-except # noqa: E722
pass
return t
def write(self, table, **kwargs):
coord = None
if "coord" in table.columns and table["coord"].info.dtype.name == "object":
coord = table["coord"]
del table["coord"]
if kwargs.pop("add_meta", False):
if not getattr(table, "meta", None):
table.meta = {}
table.meta["mtime"] = time.time()
table.meta["version"] = __version__
compress = self.compress_after_write or self.path.endswith(".gz")
file_open = gzip.open if compress else open
makedirs_if_needed(self.path)
kwargs_this = dict(self._write_default_kwargs, **kwargs)
with file_open(self.path, "wb") as f_out:
table.write(f_out, **kwargs_this)
if coord is not None:
table["coord"] = coord
class NumpyBinary(FileObject):
def read(self):
path = self._gz_fallback(self.path)
return np.load(path, **self.kwargs)
def write(self, table, **kwargs):
makedirs_if_needed(self.path)
np.savez(self.path, **table)
class DataObject(object):
"""
DataObject provide an simple interface to retrive remote data and fall back
to local copy when necessary.
Parameters
----------
remote : FileObject or its subclass
remote FileObject
local : FileObject or its subclass
local FileObject
cache_in_memory : bool, optional
whether or not to store the table in memory
use_local_first : bool, optional
whether or not to try using local file first
Examples
--------
>>> dobj = DataObject(FitsTable('http://somewhere/file.fits'), FitsTable('data/file.fits'))
You can also do
>>> dobj = DataObject(FitsTable('http://somewhere/file.fits'))
>>> dobj.local = 'data/file.fits'
Or
>>> dobj.download('data/file.fits')
"""
def __init__(self, remote, local=None, cache_in_memory=False, use_local_first=False):
self._local = None
self.remote = remote
self.local = local
self.use_local_first = bool(use_local_first)
self.cache_in_memory = bool(cache_in_memory)
self._cached_table = None
if use_local_first and local is None:
raise ValueError("Must specify `local` when setting `use_local_first=True`.")
@property
def local(self):
return self._local
@local.setter
def local(self, value):
if value is None:
self._local = None
elif isinstance(value, FileObject):
self._local = value
elif isinstance(self._local, FileObject):
self._local = type(self._local)(value, **self._local.kwargs)
elif isinstance(self.remote, FileObject):
self._local = type(self.remote)(value, **self.remote.kwargs)
else:
self._local = FileObject(value)
def read(self, reload=False, **kwargs):
"""
Read in the data
Parameters
----------
reload : bool, optional
if set to true, ignore cache
Returns
-------
table : astropy.table.Table
"""
if not reload:
table = self.retrive_cache()
if table is not None:
return table
if self.use_local_first:
if not self.local.isfile():
logging.warning("Cannot find local file; attempt to download from remote...")
self.download()
try:
table = self.local.read()
except (IOError, OSError):
logging.warning("Failed to read local file; attempt to read remote file...")
table = self.remote.read(**kwargs)
else:
try:
table = self.remote.read(**kwargs)
except Exception as read_exception: # pylint: disable=W0703
if self.local is None:
raise read_exception
logging.warning("Failed to read remote; fall back to read local file...")
if not self.local.isfile():
logging.warning("Cannot find local file; attempt to download from remote...")
self.download()
table = self.local.read()
if self.cache_in_memory:
self.store_cache(table)
return table
def write(self, table, dest=None, overwrite=False, **kwargs):
"""
write the data to file
Parameters
----------
table : astropy.table.Table
data to write
dest : str
"remote" or "local"
overwrite : bool, optional
if set to true, overwrite existing file
"""
if dest is None:
dest = "local" if self.use_local_first else "remote"
if dest.lower() == "remote":
f = self.remote
elif dest.lower() == "local":
f = self.local
else:
raise KeyError('dest must be "remote" or "local"')
if f.isfile() and not overwrite:
raise FileExistsError("set overwrite to True to overwrite the file")
f.write(table, **kwargs)
def download(self, local_file_path=None, overwrite=False, compress=False, set_as_local=True):
"""
Download in the data as a file
Parameters
----------
local_file_path : str
local file path
overwrite : bool, optional
if set to true, overwrite existing file
set_as_local : bool, optional
if set to true (default), use the file at local_file_path as the
local file for this DataObject
"""
if local_file_path is None:
try:
local_file_path = self.local.path
except AttributeError:
pass
else:
set_as_local = False # no need to do this again
self.remote.download_as_file(local_file_path, overwrite=overwrite, compress=compress)
if set_as_local:
self.local = local_file_path
@staticmethod
def _copy_table(table):
if table is None:
return None
return table.copy()
def clear_cache(self):
self._cached_table = None
def retrive_cache(self):
return self._copy_table(self._cached_table)
def store_cache(self, table):
self._cached_table = self._copy_table(table)
@property
def path(self):
return self.local.path if self.use_local_first else self.remote.path
def isfile(self):
return self.local.isfile() if self.use_local_first else self.remote.isfile()
| {
"content_hash": "dbaea0c75a546a1b5a95c6c89bc0d27d",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 112,
"avg_line_length": 31.566137566137566,
"alnum_prop": 0.5852329869259135,
"repo_name": "sagasurvey/saga",
"id": "91a1cb7abc58d4c46c238b17b6aa34c1af89155d",
"size": "11932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SAGA/database/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "376969"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import poplib
import time
from mailbox import Mailbox, Message
import mailpile.mailboxes
from mailpile.conn_brokers import Master as ConnBroker
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import UnorderedPicklable
from mailpile.util import *
class UnsupportedProtocolError(Exception):
pass
class POP3Mailbox(Mailbox):
"""
Basic implementation of POP3 Mailbox.
"""
def __init__(self, host,
user=None, password=None, use_ssl=True, port=None,
debug=False, conn_cls=None):
"""Initialize a Mailbox instance."""
Mailbox.__init__(self, '/')
self.host = host
self.user = user
self.password = password
self.use_ssl = use_ssl
self.port = port
self.debug = debug
self.conn_cls = conn_cls
self._lock = MboxRLock()
self._pop3 = None
self._connect()
def _connect(self):
with self._lock:
if self._pop3:
try:
self._pop3.noop()
return
except poplib.error_proto:
self._pop3 = None
with ConnBroker.context(need=[ConnBroker.OUTGOING_POP3]):
if self.conn_cls:
self._pop3 = self.conn_cls(self.host, self.port or 110)
self.secure = self.use_ssl
elif self.use_ssl:
self._pop3 = poplib.POP3_SSL(self.host, self.port or 995)
self.secure = True
else:
self._pop3 = poplib.POP3(self.host, self.port or 110)
self.secure = False
if self.debug:
self._pop3.set_debuglevel(self.debug)
self._keys = None
try:
self._pop3.user(self.user)
self._pop3.pass_(self.password)
except poplib.error_proto:
raise AccessError()
def _refresh(self):
with self._lock:
self._keys = None
self.iterkeys()
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def _get(self, key):
with self._lock:
if key not in self.iterkeys():
raise KeyError('Invalid key: %s' % key)
self._connect()
ok, lines, octets = self._pop3.retr(self._km[key])
if not ok.startswith('+OK'):
raise KeyError('Invalid key: %s' % key)
# poplib is stupid in that it loses the linefeeds, so we need to
# do some guesswork to bring them back to what the server provided.
# If we don't do this jiggering, then sizes don't match up, which
# could cause allocation bugs down the line.
have_octets = sum(len(l) for l in lines)
if octets == have_octets + len(lines):
lines.append('')
return '\n'.join(lines)
elif octets == have_octets + 2*len(lines):
lines.append('')
return '\r\n'.join(lines)
elif octets == have_octets + len(lines) - 1:
return '\n'.join(lines)
elif octets == have_octets + 2*len(lines) - 2:
return '\r\n'.join(lines)
else:
raise ValueError('Length mismatch in message %s' % key)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
return Message(self._get(key))
def get_bytes(self, key):
"""Return a byte string representation or raise a KeyError."""
return self._get(key)
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self._get(key))
def get_msg_size(self, key):
with self._lock:
self._connect()
if key not in self.iterkeys():
raise KeyError('Invalid key: %s' % key)
ok, info, octets = self._pop3.list(self._km[key]).split()
return int(octets)
def stat(self):
with self._lock:
self._connect()
return self._pop3.stat()
def iterkeys(self):
"""Return an iterator over keys."""
# Note: POP3 *without UIDL* is useless. We don't support it.
with self._lock:
if self._keys is None:
self._connect()
try:
stat, key_list, octets = self._pop3.uidl()
except poplib.error_proto:
raise UnsupportedProtocolError()
self._keys = [tuple(k.split(' ', 1)) for k in key_list]
self._km = dict([reversed(k) for k in self._keys])
return [k[1] for k in self._keys]
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
return key in self.iterkeys()
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(self.iterkeys())
def flush(self):
"""Write any pending changes to the disk."""
self.close()
def close(self):
"""Flush and close the mailbox."""
try:
if self._pop3:
self._pop3.quit()
finally:
self._pop3 = None
self._keys = None
class MailpileMailbox(UnorderedPicklable(POP3Mailbox)):
UNPICKLABLE = ['_pop3', '_debug']
@classmethod
def parse_path(cls, config, path, create=False):
path = path.split('/')
if path and path[0].lower() in ('pop:', 'pop3:',
'pop3_ssl:', 'pop3s:'):
proto = path[0][:-1].lower()
userpart, server = path[2].rsplit("@", 1)
user, password = userpart.rsplit(":", 1)
if ":" in server:
server, port = server.split(":", 1)
else:
port = 995 if ('s' in proto) else 110
# This is a hack for GMail
if 'recent' in path[3:]:
user = 'recent:' + user
if not config:
debug = False
elif 'pop3' in config.sys.debug:
debug = 99
elif 'rescan' in config.sys.debug:
debug = 1
else:
debug = False
# WARNING: Order must match POP3Mailbox.__init__(...)
return (server, user, password, 's' in proto, int(port), debug)
raise ValueError('Not a POP3 url: %s' % path)
def save(self, *args, **kwargs):
# Do not save state locally
pass
##[ Test code follows ]#######################################################
if __name__ == "__main__":
import doctest
import sys
class _MockPOP3(object):
"""
Base mock that pretends to be a poplib POP3 connection.
>>> pm = POP3Mailbox('localhost', user='bad', conn_cls=_MockPOP3)
Traceback (most recent call last):
...
AccessError
>>> pm = POP3Mailbox('localhost', user='a', password='b',
... conn_cls=_MockPOP3)
>>> pm.stat()
(2, 123456)
>>> pm.iterkeys()
['evil', 'good']
>>> 'evil' in pm, 'bogon' in pm
(True, False)
>>> [msg['subject'] for msg in pm]
['Msg 1', 'Msg 2']
>>> pm.get_msg_size('evil'), pm.get_msg_size('good')
(47, 51)
>>> pm.get_bytes('evil')
'From: test@mailpile.is\\nSubject: Msg 1\\n\\nOh, hi!\\n'
>>> pm['invalid-key']
Traceback (most recent call last):
...
KeyError: ...
"""
TEST_MSG = ('From: test@mailpile.is\r\n'
'Subject: Msg N\r\n'
'\r\n'
'Oh, hi!\r\n')
DEFAULT_RESULTS = {
'user': lambda s, u: '+OK' if (u == 'a') else '-ERR',
'pass_': lambda s, u: '+OK Logged in.' if (u == 'b') else '-ERR',
'stat': (2, 123456),
'noop': '+OK',
'list_': lambda s: ('+OK 2 messages:',
['1 %d' % len(s.TEST_MSG.replace('\r', '')),
'2 %d' % len(s.TEST_MSG)], 0),
'uidl': ('+OK', ['1 evil', '2 good'], 0),
'retr': lambda s, m: ('+OK',
s.TEST_MSG.replace('N', m).splitlines(),
len(s.TEST_MSG)
if m[0] == '2' else
len(s.TEST_MSG.replace('\r', ''))),
}
RESULTS = {}
def __init__(self, *args, **kwargs):
def mkcmd(rval):
def r(rv):
if isinstance(rv, (str, unicode)) and rv[0] != '+':
raise poplib.error_proto(rv)
return rv
def cmd(*args, **kwargs):
if isinstance(rval, (str, unicode, list, tuple, dict)):
return r(rval)
else:
return r(rval(self, *args, **kwargs))
return cmd
for cmd, rval in dict_merge(self.DEFAULT_RESULTS, self.RESULTS
).iteritems():
self.__setattr__(cmd, mkcmd(rval))
def list(self, which=None):
msgs = self.list_()
if which:
return '+OK ' + msgs[1][1-int(which)]
return msgs
def __getattr__(self, attr):
return self.__getattribute__(attr)
class _MockPOP3_Without_UIDL(_MockPOP3):
"""
Mock that lacks the UIDL command.
>>> pm = POP3Mailbox('localhost', user='a', password='b',
... conn_cls=_MockPOP3_Without_UIDL)
>>> pm.iterkeys()
Traceback (most recent call last):
...
UnsupportedProtocolError
"""
RESULTS = {'uidl': '-ERR'}
results = doctest.testmod(optionflags=doctest.ELLIPSIS,
extraglobs={})
print '%s' % (results, )
if results.failed:
sys.exit(1)
if len(sys.argv) > 1:
mbx = MailpileMailbox(*MailpileMailbox.parse_path(None, sys.argv[1]))
print 'Status is: %s' % (mbx.stat(), )
print 'Downloading mail and listing subjects, hit CTRL-C to quit'
for msg in mbx:
print msg['subject']
time.sleep(2)
else:
mailpile.mailboxes.register(10, MailpileMailbox)
| {
"content_hash": "4570d761e764a8772bb1241edc26f91c",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 78,
"avg_line_length": 32.86503067484663,
"alnum_prop": 0.4922531267500467,
"repo_name": "laborautonomo/Mailpile",
"id": "3e18093c818890e26888513ade116b89c3a3525f",
"size": "10714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mailpile/mailboxes/pop3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "131369"
},
{
"name": "JavaScript",
"bytes": "563983"
},
{
"name": "Makefile",
"bytes": "5962"
},
{
"name": "Python",
"bytes": "1261330"
},
{
"name": "Shell",
"bytes": "18103"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from rest_framework import permissions
class IsAssetRemovable(permissions.BasePermission):
"""
Object-level permission to delete an asset
only when its records_set is empty.
"""
message = _('Cannot delete an asset with existing records bound to it.')
def has_object_permission(self, request, view, obj):
if request.method == 'DELETE':
if obj.to_asset_records.exists() or obj.from_asset_records.exists():
return False
return True
| {
"content_hash": "98da75001b10c210ed22a74e8faf1f20",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 80,
"avg_line_length": 31,
"alnum_prop": 0.6774193548387096,
"repo_name": "BasicWolf/minicash",
"id": "96560e0ff417a94991095402d59add59530509d9",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/minicash/core/permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6064"
},
{
"name": "Emacs Lisp",
"bytes": "317"
},
{
"name": "Gherkin",
"bytes": "6864"
},
{
"name": "HTML",
"bytes": "23863"
},
{
"name": "JavaScript",
"bytes": "109423"
},
{
"name": "Makefile",
"bytes": "3700"
},
{
"name": "Python",
"bytes": "134267"
}
],
"symlink_target": ""
} |
class DoublyListNode(object):
def __init__(self, val):
self.val = val
self.next = None
self.prev = None
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
if not root:
return None
res = [[root.val]]
queue = [root]
while queue:
level = len(queue)
temp = []
while level:
node = queue.pop(0)
if not node.left:
temp.append(".")
if node.left:
queue.append(node.left)
temp.append(node.left.val)
if not node.right:
temp.append(".")
if node.right:
queue.append(node.right)
temp.append(node.right.val)
level -= 1
res.append(list(temp)) # diff temp
return res[:-1]
m = Solution()
tree = TreeNode(1)
tree.left = TreeNode(2)
tree.right = TreeNode(3)
tree.right.left = TreeNode(4)
print m.levelOrder(tree) | {
"content_hash": "46c19adf5b8758eceba2f8e4ab8e9949",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 47,
"avg_line_length": 27.25581395348837,
"alnum_prop": 0.4735494880546075,
"repo_name": "youhusky/Facebook_Prepare",
"id": "1a1c1f890270a9028084a4797965c813dbaa401a",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wepay and OfferUp/printTreelevelOrder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187109"
}
],
"symlink_target": ""
} |
from subprocess import call, check_output#, run
import subprocess
import os
from multiprocessing import Pool
import time
import argparse
import logging
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="BAM file (can be the location on S3)", nargs='+')
parser.add_argument("-n", "--cores", help="Number of Cores to use")
parser.add_argument("-m", "--memory", help="RAM Memory to use in GB")
args = parser.parse_args()
bam_files = args.input
n_cores = int(args.cores)
memory = int(args.memory)
print(bam_files)
bam_groups = list(chunks(bam_files,n_cores))
print(bam_groups)
# original_bam = bam_file
# print(bam_file)
human_reference = "/home/ubuntu/projects/input/b37/human_g1k_v37.fasta" #84 features
human_reference = "/home/ubuntu/projects/input/grch37/d5/hs37d5.fa" #86 features
gtf_file = "/home/ubuntu/projects/input/gtf/Homo_sapiens.GRCh37.75.gtf"
fastqc_dir = "/home/ubuntu/projects/programs/fastqc/FastQC/"
samtools_dir = "/home/ubuntu/projects/programs/samtools-1.3.1"
bamtools_dir = "/home/ubuntu/projects/programs/bamtools/bin/bamtools"
programs_dir = "/home/ubuntu/projects/programs/"
picard_dir = "/home/ubuntu/projects/programs/picard"
gatk_dir = "/home/ubuntu/projects/programs/gatk"
qualimap_dir = "/home/ubuntu/projects/programs/qualimap/qualimap_v2.2"
featurecounts_dir = "/home/ubuntu/projects/programs/subread-1.5.1-Linux-x86_64/bin"
input_folder = '/home/ubuntu/projects/input/bam'
# print(base, base_name)
output_folder = '/home/ubuntu/projects/output/bam/'
# if not os.path.exists(output_folder):
# os.makedirs(output_folder)
logging.basicConfig(filename='bam_genome_metrics.run.log.txt',level=logging.DEBUG)
# print(base_name)
# print(bam_file)
for bam_group in bam_groups:
for bam_file in bam_group:
original_bam = bam_file
base=os.path.basename(bam_file)
base_name = os.path.splitext(base)[0]
if bam_file.startswith('s3://'):
#download file to input folder
command = "s3cmd get --continue %s %s/" % (bam_file, input_folder)
output = call(command, shell=True)
logging.info(output)
print(output)
# print(command)
bam_file = "%s/%s" % (input_folder, base)
print(bam_file)
if not os.path.exists(bam_file+'.bai'):
#Download index
command = "s3cmd get --continue %s.bai %s/" % (original_bam, input_folder)
output = call(command, shell=True)
logging.info(output)
print(output)
command = "%s/fastqc -t %s %s -o %s" % (fastqc_dir, n_cores, " ".join(bam_group), output_folder)
print(command)
output = call(command, shell=True)
print(output)
# #samtools flagstat
# print('Running samtools flagstat')
# command = """%s/samtools flagstat %s > %s/%s.samtools.flagstat.txt
# """ % (samtools_dir, bam_file, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
for bam_group in bam_group:
for bam_file in bam_group:
base=os.path.basename(bam_file)
base_name = os.path.splitext(base)[0]
output_folder = '/home/ubuntu/projects/output/bam/%s' % (base_name)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#samtools flagstat
print('Running sambamba flagstat')
command = """%s/sambamba_v0.6.4 flagstat -t %s -p %s > %s/%s.samtools.flagstat.txt
""" % (programs_dir, n_cores, bam_file, output_folder, base_name)
output = call(command, shell=True)
print(output)
print('Running featureCounts')
#featureCounts
command = """%s/featureCounts --donotsort -T %s -p \
-a %s \
-o %s/%s.featureCounts.txt \
%s""" % (featurecounts_dir, n_cores, gtf_file, output_folder, base_name, bam_file)
output = call(command, shell=True)
print(output)
print('Running DepthOfCoverage')
#gatk DepthOfCoverage
command = """
java -Xmx%sg -jar %s/GenomeAnalysisTK.jar -T DepthOfCoverage \
-I %s \
-R %s \
-o %s/%s.DepthOfCoverage.txt \
-ct 15 -ct 50 -ct 100 -ct 150 -ct 200 \
-log %s/%s.DepthofCoverage.log \
--omitIntervalStatistics \
-nt %s
""" % (memory, gatk_dir, bam_file, human_reference, output_folder, base_name, output_folder, base_name, n_cores)
output = call(command, shell=True)
print(output)
#qualimap BamQC
print('Running qualimap BamQC')
command = """%s/qualimap bamqc \
--java-mem-size=%sG \
-bam %s \
-outdir %s \
-nt %s
""" % (qualimap_dir, memory, bam_file, output_folder, n_cores)
output = call(command, shell=True)
print(output)
os.remove(bam_file)
#bamtools do not run in parallel
# print('Running bamtools')
# command = """/home/ubuntu/projects/programs/bamtools/bin/bamtools stats -in %s > %s/%s.bamtools.stats.txt""" % (bam_file, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# #picard
# print('Running CollectAlignmentSummaryMetrics')
# # #CollectAlignmentSummaryMetrics
# command = """
# java -jar -Xmx%sg %s/picard.jar CollectAlignmentSummaryMetrics \
# I=%s \
# O=%s/%s.AlignmentSummaryMetrics.txt \
# R=%s \
# VALIDATION_STRINGENCY=SILENT""" % (memory, picard_dir, bam_file, output_folder, base_name, human_reference)
# output = call(command, shell=True)
# print(output)
# print('Running CollectGcBiasMetrics')
# # #CollectGcBiasMetrics
# command = """
# java -jar -Xmx%sg %s/picard.jar CollectGcBiasMetrics \
# I=%s \
# O=%s/%s.gc_bias_metrics.txt \
# R=%s \
# CHART=%s/%s.gc_bias_metrics.pdf \
# S=%s/%s.gc_bias_summary_metrics.txt \
# VALIDATION_STRINGENCY=SILENT""" % (memory, picard_dir, bam_file, output_folder, base_name, human_reference, output_folder, base_name, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# print('Running CollectInsertSizeMetrics')
# #CollectInsertSizeMetrics
# command = """java -Xmx%sg -jar %s/picard.jar CollectInsertSizeMetrics \
# I=%s \
# O=%s/%s.insert_size_metrics.txt \
# H=%s/%s.insert_size_histogram.pdf \
# M=0.5 \
# VALIDATION_STRINGENCY=SILENT""" % (memory, picard_dir, bam_file, output_folder, base_name, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# # #MeanQualityByCycle
# print('Running MeanQualityByCycle')
# command = """java -Xmx%sg -jar %s/picard.jar MeanQualityByCycle \
# I=%s \
# O=%s/%s.mean_qual_by_cycle.txt \
# CHART=%s/%s.mean_qual_by_cycle.pdf \
# VALIDATION_STRINGENCY=SILENT """ % (memory, picard_dir, bam_file, output_folder, base_name, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# print('Running QualityScoreDistribution')
# # #QualityScoreDistribution
# command = """java -Xmx%sg -jar %s/picard.jar QualityScoreDistribution \
# I=%s \
# O=%s/%s.qual_score_dist.txt \
# CHART=%s/%s.qual_score_dist.pdf \
# VALIDATION_STRINGENCY=SILENT """ % (memory, picard_dir, bam_file, output_folder, base_name, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# print('Running BamIndexStats')
# # #BamIndexStats
# command = """java -Xmx%s -jar %s/picard.jar BamIndexStats \
# I=%s \
# O=%s/%s.BamIndexStats.output.txt \
# VALIDATION_STRINGENCY=SILENT""" % (memory_use, picard_dir, bam_file, output_folder, base_name)
# output = call(command, shell=True)
# print(output)
# print('Running CollectHsMetrics')
#CollectHsMetrics
# command = """java -Xmx%s -jar %s/picard.jar CollectHsMetrics \
# I=%s \
# O=%s/%s.hs_metrics.txt \
# R=%s \
# VALIDATION_STRINGENCY=SILENT """ % (memory_use, picard_dir, bam_file, output_folder, base_name, human_reference)
# output = call(command, shell=True)
# print(output) | {
"content_hash": "d83c4738a34ed5062467248a849cc692",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 161,
"avg_line_length": 33.652542372881356,
"alnum_prop": 0.6557542180810879,
"repo_name": "raonyguimaraes/ngs_metrics",
"id": "c0bbb7d4bef3bdcac2531a7bc4cc9ab3d678aa60",
"size": "7989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bam_genome_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52071"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
from numpy import sqrt, squeeze, zeros_like
from numpy.random import randn, uniform
def init_unif(sz):
"""
Uniform intialization
Heuristic commonly used to initialize deep neural networks
"""
bnd = 1 / sqrt(sz[0])
p = uniform(low=-bnd, high=bnd, size=sz)
return squeeze(p)
def init_nunif(sz):
"""
Normalized uniform initialization
See Glorot X., Bengio Y.: "Understanding the difficulty of training
deep feedforward neural networks". AISTATS, 2010
"""
bnd = sqrt(6) / sqrt(sz[0] + sz[1])
p = uniform(low=-bnd, high=bnd, size=sz)
return squeeze(p)
def init_randn(sz):
return squeeze(randn(*sz))
class Parameter(np.ndarray):
def __new__(cls, *args, **kwargs):
# TODO: hackish, find better way to handle higher-order parameters
if len(args[0]) == 3:
sz = (args[0][1], args[0][2])
arr = np.array([Parameter._init_array(sz, args[1]) for _ in range(args[0][0])])
else:
arr = Parameter._init_array(args[0], args[1])
arr = arr.view(cls)
arr.name = kwargs.pop('name', None)
arr.post = kwargs.pop('post', None)
if arr.post is not None:
arr = arr.post(arr)
return arr
def __array_finalize__(self, obj):
if obj is None:
return
self.name = getattr(obj, 'name', None)
self.post = getattr(obj, 'post', None)
@staticmethod
def _init_array(shape, method):
mod = sys.modules[__name__]
method = 'init_%s' % method
if not hasattr(mod, method):
raise ValueError('Unknown initialization (%s)' % method)
elif len(shape) != 2:
raise ValueError('Shape must be of size 2')
return getattr(mod, method)(shape)
class ParameterUpdate(object):
def __init__(self, param, learning_rate):
self.param = param
self.learning_rate = learning_rate
def __call__(self, gradient, idx=None):
self._update(gradient, idx)
if self.param.post is not None:
self.param = self.param.post(self.param, idx)
def reset(self):
pass
class SGD(ParameterUpdate):
"""
Class to perform SGD updates on a parameter
"""
def _update(self, g, idx):
self.param[idx] -= self.learning_rate * g
class AdaGrad(ParameterUpdate):
def __init__(self, param, learning_rate):
super(AdaGrad, self).__init__(param, learning_rate)
self.p2 = zeros_like(param)
def _update(self, g, idx=None):
self.p2[idx] += g * g
H = np.maximum(np.sqrt(self.p2[idx]), 1e-7)
self.param[idx] -= self.learning_rate * g / H
def reset(self):
self.p2 = zeros_like(self.p2)
def normalize(M, idx=None):
if idx is None:
M = M / np.sqrt(np.sum(M ** 2, axis=1))[:, np.newaxis]
else:
nrm = np.sqrt(np.sum(M[idx, :] ** 2, axis=1))[:, np.newaxis]
M[idx, :] = M[idx, :] / nrm
return M
def normless1(M, idx=None):
nrm = np.sum(M[idx] ** 2, axis=1)[:, np.newaxis]
nrm[nrm < 1] = 1
M[idx] = M[idx] / nrm
return M
| {
"content_hash": "ea6f1a6459388967fd3dac0cae5435ae",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 95,
"avg_line_length": 26.68595041322314,
"alnum_prop": 0.5602353669866832,
"repo_name": "mnick/scikit-kge",
"id": "a844b04ad5b2c1c27d7e253b970f49be2c9c8f2e",
"size": "3229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skge/param.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32804"
}
],
"symlink_target": ""
} |
import copy
import multiprocessing
import random
from deap import creator, base, tools, algorithms
from train import Train
creator.create("FitnessMin", base.Fitness, weights=(-1.0, 1.0, -0.1, -0.1))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 1, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=81)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
t = Train(distance=3200)
print(t.control(t.ascii2list('')))
def eval_gem(individual):
tc = copy.deepcopy(t)
return tc.control(individual), tc.is_success(), tc.speed, abs(tc.distance - tc.position)
toolbox.register("evaluate", eval_gem)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=-2, up=2, indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=30)
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=8)
toolbox.register("map", pool.map)
population = toolbox.population(n=2400)
print(" Evaluated %i individuals" % len(population))
gen = 0
while True:
gen += 1
offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.2)
fits = toolbox.map(toolbox.evaluate, offspring)
for fit, ind in zip(fits, offspring):
ind.fitness.values = fit
population = toolbox.select(offspring, k=len(population))
if gen % 10 == 0:
best_ind = tools.selBest(population, 1)[0]
print('%d\t[%s]\t[%s]' % (gen, Train.list2ascii(best_ind), best_ind.fitness.values))
if gen > 1000:
break
pool.close()
print("-- End of (successful) evolution --")
best_ind = tools.selBest(population, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
| {
"content_hash": "f84f5289a798d7e6b7831e3340920cc3",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 96,
"avg_line_length": 30.19047619047619,
"alnum_prop": 0.6692954784437434,
"repo_name": "mikoim/funstuff",
"id": "cc59ff2422ddae04bd0b53c2d33b20509fbe975d",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system simulation/2015/train/simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "26516"
},
{
"name": "C",
"bytes": "85616"
},
{
"name": "C++",
"bytes": "7221"
},
{
"name": "CMake",
"bytes": "2282"
},
{
"name": "CSS",
"bytes": "1661"
},
{
"name": "Elixir",
"bytes": "541"
},
{
"name": "Go",
"bytes": "8433"
},
{
"name": "HTML",
"bytes": "176956"
},
{
"name": "Haskell",
"bytes": "603"
},
{
"name": "Java",
"bytes": "3206"
},
{
"name": "JavaScript",
"bytes": "48109"
},
{
"name": "Jupyter Notebook",
"bytes": "9043"
},
{
"name": "Lex",
"bytes": "5512"
},
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mathematica",
"bytes": "189020"
},
{
"name": "Nginx",
"bytes": "648"
},
{
"name": "PHP",
"bytes": "5962"
},
{
"name": "Protocol Buffer",
"bytes": "968"
},
{
"name": "Python",
"bytes": "147116"
},
{
"name": "Shell",
"bytes": "2192"
},
{
"name": "Yacc",
"bytes": "14693"
}
],
"symlink_target": ""
} |
from lookml.core import \
View,\
Dimension,Dimension_Group,Filter,Parameter,\
Measure,\
Explore,Model,Manifest, DENSE_LOOKML_THRESHOLD
from lookml.lib.project import *
from lookml import lkml
| {
"content_hash": "62620233825566fde5f5f2d7d3a749ea",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.7451923076923077,
"repo_name": "looker-open-source/pylookml",
"id": "d4321612d76938ebd8bbdb4788e93573c58b727d",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lookml/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "LookML",
"bytes": "415913"
},
{
"name": "Python",
"bytes": "634838"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import
from itertools import cycle
from operator import itemgetter
from toolz import unique, groupby
import bokeh.plotting as bp
from bokeh.io import _state
from bokeh.palettes import brewer
from bokeh.models import HoverTool, LinearAxis, Range1d
from ..dot import funcname
from ..core import istask
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5])}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
"""
if istask(task):
func = task[0]
if hasattr(func, 'funcs'):
head = '('.join(funcname(f) for f in func.funcs)
tail = ')'*len(func.funcs)
else:
head = funcname(task[0])
tail = ')'
label_size2 = int((label_size - len(head) - len(tail)) / len(task[1:]))
if label_size2 > 5:
args = ', '.join(pprint_task(t, keys, label_size2)
for t in task[1:])
else:
args = '...'
result = '{0}({1}{2}'.format(head, args, tail)
elif isinstance(task, list):
task2 = task[:3]
label_size2 = int((label_size - 2 - 2*len(task2)) / len(task2))
args = ', '.join(pprint_task(t, keys, label_size2) for t in task2)
if len(task) > 3:
result = '[{0}, ...]'.format(args)
else:
result = '[{0}]'.format(args)
else:
try:
if task in keys:
result = '_'
else:
result = '*'
except TypeError:
result = '*'
return result
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the palette. Must be a key in bokeh.palettes.brewer
funcs : iterable
Iterable of function names
"""
unique_funcs = list(sorted(unique(funcs)))
n_funcs = len(unique_funcs)
palette_lookup = brewer[palette]
keys = list(palette_lookup.keys())
low, high = min(keys), max(keys)
if n_funcs > high:
colors = cycle(palette_lookup[high])
elif n_funcs < low:
colors = palette_lookup[low]
else:
colors = palette_lookup[n_funcs]
color_lookup = dict(zip(unique_funcs, colors))
return [color_lookup[n] for n in funcs]
def visualize(profilers, file_path=None, show=True, save=True, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
file_path : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default), the plot is saved to disk.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
if not _state._notebook:
file_path = file_path or "profile.html"
bp.output_file(file_path)
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
for f in figs[:1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if file_path and save:
bp.save(p)
return p
def plot_tasks(results, dsk, palette='GnBu', label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
keys, tasks, starts, ends, ids = zip(*results)
id_group = groupby(itemgetter(4), results)
timings = dict((k, [i.end_time - i.start_time for i in v]) for (k, v) in
id_group.items())
id_lk = dict((t[0], n) for (n, t) in enumerate(sorted(timings.items(),
key=itemgetter(1), reverse=True)))
left = min(starts)
right = max(ends)
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
p = bp.figure(y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left], **defaults)
data = {}
data['width'] = width = [e - s for (s, e) in zip(starts, ends)]
data['x'] = [w/2 + s - left for (w, s) in zip(width, starts)]
data['y'] = [id_lk[i] + 1 for i in ids]
data['function'] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data['color'] = get_colors(palette, funcs)
data['key'] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(source=source, x='x', y='y', height=1, width='width',
color='color', line_color='gray')
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = 'follow_mouse'
return p
def plot_resources(results, palette='GnBu', **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
defaults = dict(title="Profile Results",
tools="save,reset,resize,xwheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
p = bp.figure(y_range=(0, max(cpu)), x_range=(0, right - left), **defaults)
colors = brewer[palette][6]
p.line(t, cpu, color=colors[0], line_width=4, legend='% CPU')
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {'memory': Range1d(start=0, end=max(mem))}
p.line(t, mem, color=colors[2], y_range_name='memory', line_width=4,
legend='Memory')
p.add_layout(LinearAxis(y_range_name='memory', axis_label='Memory (MB)'),
'right')
p.xaxis.axis_label = "Time (s)"
return p
| {
"content_hash": "11e8e94b25c8fcfaad5abb8b1657efc1",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 87,
"avg_line_length": 31.84328358208955,
"alnum_prop": 0.5650339817201782,
"repo_name": "clarkfitzg/dask",
"id": "79c3d3bc6a0704966ee717b9a69f2fd5cd2a04a2",
"size": "8534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/diagnostics/profile_visualize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "908042"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
import os
import sys
import click
from alertaclient.api import Client
from alertaclient.auth.utils import get_token
from alertaclient.config import Config
CONTEXT_SETTINGS = dict(
auto_envvar_prefix='ALERTA',
default_map={'query': {'compact': True}},
help_option_names=['-h', '--help'],
)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'commands'))
class AlertaCLI(click.MultiCommand):
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
filename.startswith('cmd_'):
rv.append(filename[4:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
if sys.version_info[0] == 2:
name = name.encode('ascii', 'replace')
mod = __import__('alertaclient.commands.cmd_' + name, None, None, ['cli'])
except ImportError:
return
return mod.cli
@click.command(cls=AlertaCLI, context_settings=CONTEXT_SETTINGS)
@click.option('--config-file', metavar='<FILE>', help='Configuration file.')
@click.option('--profile', metavar='<PROFILE>', help='Configuration profile.')
@click.option('--endpoint-url', metavar='<URL>', help='API endpoint URL.')
@click.option('--output', 'output', metavar='<FORMAT>', help='Output format. eg. plain, simple, grid, psql, presto, rst, html, json, json_lines')
@click.option('--json', 'output', flag_value='json', help='Output in JSON format. Shortcut for "--output json"')
@click.option('--color/--no-color', help='Color-coded output based on severity.')
@click.option('--debug', is_flag=True, help='Debug mode.')
@click.pass_context
def cli(ctx, config_file, profile, endpoint_url, output, color, debug):
"""
Alerta client unified command-line tool.
"""
config = Config(config_file)
config.get_config_for_profle(profile)
config.get_remote_config(endpoint_url)
ctx.obj = config.options
# override current options with command-line options or environment variables
ctx.obj['output'] = output or config.options['output']
ctx.obj['color'] = color or os.environ.get('CLICOLOR', None) or config.options['color']
endpoint = endpoint_url or config.options['endpoint']
ctx.obj['client'] = Client(
endpoint=endpoint,
key=config.options['key'],
token=get_token(endpoint),
username=config.options.get('username', None),
password=config.options.get('password', None),
timeout=float(config.options['timeout']),
ssl_verify=config.options['sslverify'],
ssl_cert=config.options.get('sslcert', None),
ssl_key=config.options.get('sslkey', None),
debug=debug or os.environ.get('DEBUG', None) or config.options['debug']
)
| {
"content_hash": "6a4b976ba77593ab6afe6566cef9c12b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 145,
"avg_line_length": 38.08108108108108,
"alnum_prop": 0.6462029808374734,
"repo_name": "alerta/python-alerta",
"id": "663f2e7f02e3031a1116cbc8128d31ad71dd314c",
"size": "2818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alertaclient/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106941"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class Usage(Model):
"""Describes network resource usage.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar unit: An enum describing the unit of measurement. Default value:
"Count" .
:vartype unit: str
:param current_value: The current value of the usage.
:type current_value: long
:param limit: The limit of usage.
:type limit: long
:param name: The name of the type of usage.
:type name: :class:`UsageName <azure.mgmt.network.models.UsageName>`
"""
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, current_value, limit, name):
self.current_value = current_value
self.limit = limit
self.name = name
| {
"content_hash": "cfa1dfca78743da19f13cd3cd498317a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 30.15,
"alnum_prop": 0.587893864013267,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "dda586f17b6a7891410dd0e1e9aa1bc6c17a9cd2",
"size": "1680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/models/usage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
"""
Base settings for Sigma Pi, Gamma Iota chapter website.
"""
import os
BASE_DIR = os.getcwd()
EC_EMAIL = "gr-sigmapi@wpi.edu"
ACTIVES_EMAIL = "sigmapiactives@wpi.edu"
ALUMNI_EMAIL = "sigmapialumni@wpi.edu"
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = "America/New_York"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = "/content/"
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"sass_processor.finders.CssFinder",
)
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/secure/"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"apps.PubSite.processors.menu_items",
],
},
}
]
MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
X_FRAME_OPTIONS = "SAMEORIGIN"
DOWNLOADVIEW_RULES = [
{
"destination_dir": "lightpd-optimized-by-middleware",
},
]
ROOT_URLCONF = "common.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "common.wsgi.application"
PREREQ_APPS = (
"django",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"sass_processor",
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
PROJECT_APPS = (
"common",
"apps.PubSite",
"apps.UserInfo",
"apps.Archive",
"apps.PartyList",
"apps.PartyListV2",
"apps.Secure",
"apps.Links",
"apps.Standards",
"apps.Scholarship",
"apps.Slack",
)
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
SESSION_SERIALIZER = "django.contrib.sessions.serializers.JSONSerializer"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
}
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
SASS_PROCESSOR_AUTO_INCLUDE = False
SASS_PRECISION = 8
PUBLIC_PAGES = [
{"title": "Home", "view": "pub-index"},
{"title": "About", "view": "pub-about"},
{"title": "Service & Activities", "view": "pub-service"},
# {"title": "Campaign", "view": "pub-campaign"},
{"title": "Rush Sigma Pi", "view": "pub-rush"},
{"title": "Brothers", "view": "userinfo-users"},
{
"title": "Donate",
"target": "_blank",
"url": ("https://paypal.me/sigmapigammaiota?locale.x=en_US"),
}, # reasons why I hate python, this single string looks like three
# This is hard-coded into login.scss
{"title": "Log In", "view": "pub-login"},
]
| {
"content_hash": "9d29740304bf5883ddbd5bc68ccbdc70",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 85,
"avg_line_length": 30,
"alnum_prop": 0.6604790419161677,
"repo_name": "sigmapi-gammaiota/sigmapi-web",
"id": "b23a971ffd4caf424c10400e958a91461bef473e",
"size": "5010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigmapiweb/common/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47173"
},
{
"name": "HTML",
"bytes": "265883"
},
{
"name": "JavaScript",
"bytes": "1338629"
},
{
"name": "Python",
"bytes": "335952"
},
{
"name": "SCSS",
"bytes": "44203"
},
{
"name": "Shell",
"bytes": "3928"
}
],
"symlink_target": ""
} |
import copy
import datetime
import json
import logging
import os
import re
import time
import warnings
from collections import defaultdict
from typing import List
import pandas as pd
import requests
from pyshex import ShExEvaluator
from rdflib import Graph
from shexer.shaper import Shaper
from wikidataintegrator.wdi_backoff import wdi_backoff
from wikidataintegrator.wdi_config import config
from wikidataintegrator.wdi_fastrun import FastRunContainer
from wikidataintegrator.wdi_helpers import MappingRelationHelper
from wikidataintegrator import wdi_rdf
"""
Authors:
Andra Waagmeester (andra' at ' micelio.be)
Gregory Stupp (stuppie' at 'gmail.com )
Sebastian Burgstaller (sebastian.burgstaller' at 'gmail.com
This file is part of the WikidataIntegrator.
"""
"""
working examples:
https://www.wikidata.org/wiki/Special:EntityData/P279.json
https://commons.wikimedia.org/wiki/Special:EntityData/M574781.json
"""
__author__ = 'Andra Waagmeester, Gregory Stupp, Sebastian Burgstaller '
__license__ = 'MIT'
class WDFunctionsEngine(object):
def __init__(self, mediawiki_api_url=None, sparql_endpoint_url=None,):
self.mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
self.sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url
@staticmethod
def get_rdf(wd_item_id='', format="turtle", mediawiki_api_url=None):
"""
function to get RDF of a Wikidata item
:param wd_item_id='': Wikidata identifier to extract the RDF of
:param format RDF from to return takes (turtle, ntriples, rdfxml,)
:param mediawiki_api_url: default to wikidata's api, but can be changed to any wikibase
:return:
"""
localcopy = Graph()
localcopy.parse(config["CONCEPT_BASE_URI"] + wd_item_id + ".ttl")
return localcopy.serialize(format=format)
@staticmethod
@wdi_backoff()
def execute_sparql_query(query, prefix=None, endpoint=None, user_agent=None, as_dataframe=False, max_retries=1000, retry_after=60):
"""
Static method which can be used to execute any SPARQL query
:param prefix: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes
:param query: The actual SPARQL query string
:param endpoint: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:param user_agent: Set a user agent string for the HTTP header to let the WDQS know who you are.
:param as_dataframe: Return result as pandas dataframe
:type user_agent: str
:param max_retries: The number time this function should retry in case of header reports.
:param retry_after: the number of seconds should wait upon receiving either an error code or the WDQS is not reachable.
:return: The results of the query are returned in JSON format
"""
sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if endpoint is None else endpoint
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
if prefix:
query = prefix + '\n' + query
params = {
'query': '#Tool: wdi_core fastrun\n' + query,
'format': 'json'
}
headers = {
'Accept': 'application/sparql-results+json',
'User-Agent': user_agent
}
response = None
for n in range(max_retries):
try:
response = requests.post(sparql_endpoint_url, params=params, headers=headers)
except requests.exceptions.ConnectionError as e:
print("Connection error: {}. Sleeping for {} seconds.".format(e, retry_after))
time.sleep(retry_after)
continue
if response.status_code == 503:
print("service unavailable. sleeping for {} seconds".format(retry_after))
time.sleep(retry_after)
continue
if response.status_code == 429:
if "retry-after" in response.headers.keys():
retry_after = response.headers["retry-after"]
print("service unavailable. sleeping for {} seconds".format(retry_after))
time.sleep(retry_after)
continue
response.raise_for_status()
results = response.json()
if as_dataframe:
return WDItemEngine._sparql_query_result_to_df(results)
else:
return results
@staticmethod
def _sparql_query_result_to_df(results):
def parse_value(item):
if item.get("type") == "http://www.w3.org/2001/XMLSchema#decimal":
return float(item['value'])
if item.get("type") == "http://www.w3.org/2001/XMLSchema#integer":
return int(item['value'])
if item.get("type") == "http://www.w3.org/2001/XMLSchema#dateTime":
return datetime.datetime.strptime(item['value'], '%Y-%m-%dT%H:%M:%SZ')
return item['value']
results = results['results']['bindings']
results = [{k: parse_value(v) for k, v in item.items()} for item in results]
df = pd.DataFrame(results)
return df
@staticmethod
def delete_item(item, reason, login, mediawiki_api_url=None, user_agent=None):
"""
Takes a list of items and posts them for deletion by Wikidata moderators, appends at the end of the deletion
request page.
:param item: a QID which should be deleted
:type item: string
:param reason: short text about the reason for the deletion request
:type reason: str
:param login: A WDI login object which contains username and password the edit should be performed with.
:type login: wdi_login.WDLogin
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
params = {
'action': 'delete',
'title': 'Item:' + item,
'reason': reason,
'token': login.get_edit_token(),
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers)
print(r.json())
@staticmethod
def delete_statement(statement_id, revision, login, mediawiki_api_url='https://www.wikidata.org/w/api.php',
user_agent=config['USER_AGENT_DEFAULT']):
params = {
'action': 'wbremoveclaims',
'claim': statement_id,
'token': login.get_edit_token(),
'baserevid': revision,
'bot': True,
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers)
print(r.json())
## SHEX related functions
@staticmethod
def check_shex_conformance(qid=None,data=None, eid=None, entity_schema_repo=None, output='confirm'):
"""
Static method which can be used to check for conformance of a Wikidata item to an EntitySchema any SPARQL query
:param qid: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes
:param eid: The EntitySchema identifier from Wikidata
:param sparql_endpoint_url: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:param output: results of a test of conformance on a given shape expression
:return: The results of the query are returned in string format
"""
if not bool(qid):
raise ValueError('Please provide a QID even with a json object of a Wikidata item')
rdfdata = Graph()
if not bool(data):
rdfdata.parse(config["CONCEPT_BASE_URI"] + qid + ".ttl")
else:
rdfdata.parse(data=data)
entity_schema_repo = config["ENTITY_SCHEMA_REPO"] if entity_schema_repo is None else entity_schema_repo
schema = requests.get(entity_schema_repo+eid).text
for result in ShExEvaluator(rdf=rdfdata, schema=schema, focus=config["CONCEPT_BASE_URI"] + qid).evaluate():
shex_result = dict()
if result.result:
shex_result["result"] = True
else:
shex_result["result"] = False
shex_result["reason"] = result.reason
shex_result["focus"] = result.focus
if output == "confirm":
return shex_result["result"]
elif output == "reason":
return shex_result["reason"]
else:
return shex_result
@staticmethod
def extract_shex(qid, extract_shape_of_qualifiers=False, just_direct_properties=True,
comments=False, endpoint="https://query.wikidata.org/sparql"):
"""
It extracts a shape tor the entity specified in qid. The shape is built w.r.t the outgoing
properties of the selected Wikidata entity.
Optionally, it generates as well a shape for each qualifier.
:param qid: Wikidata identifier to which other wikidata items link
:param extract_shape_of_qualifiers: It it is set to True, the result will contain the shape of the qid
selected but also the shapes of its qualifiers.
:param just_direct_properties: If it set to True, the shape obtained will just contain direct properties to other
Wikidata items. It will ignore qualifiers. Do not set to True if extract_shape_of_qualifiers is True
:param comments: If it is set to True, each triple constraint will have an associated comment that indicates
the trustworthiness of each triple constraint. This is usefull for shapes that have been extracted
w.r.t to the properties of more than one entity.
:param endpoint: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:return: shex content in String format
"""
namespaces_dict = {
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://www.wikidata.org/prop/": "p",
"http://www.wikidata.org/prop/direct/": "wdt",
"http://www.wikidata.org/entity/": "wd",
"http://www.w3.org/2001/XMLSchema#": "xsd",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/XML/1998/namespace": "xml",
"http://wikiba.se/ontology#": "wikibase",
"http://schema.org/": "schema",
"http://www.w3.org/2004/02/skos/core#": "skos"
}
namespaces_to_ignore = [ # Ignoring these namespaces, mainly just direct properties are considered.
"http://www.wikidata.org/prop/",
"http://www.wikidata.org/prop/direct-normalized/",
"http://schema.org/",
"http://www.w3.org/2004/02/skos/core#",
"http://wikiba.se/ontology#",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"http://www.w3.org/2000/01/rdf-schema#"
]
shape_map = "<http://www.wikidata.org/entity/{qid}>@<{qid}>".format(qid=qid)
shaper = Shaper(shape_map_raw=shape_map,
url_endpoint=endpoint,
disable_comments=not comments,
shape_qualifiers_mode=extract_shape_of_qualifiers,
namespaces_dict=namespaces_dict,
namespaces_to_ignore=namespaces_to_ignore if just_direct_properties else None,
namespaces_for_qualifier_props=["http://www.wikidata.org/prop/"],
depth_for_building_subgraph=2 if extract_shape_of_qualifiers else 1)
return shaper.shex_graph(string_output=True,
acceptance_threshold=0)
class WDItemEngine(object):
databases = {}
pmids = []
log_file_name = ''
fast_run_store = []
DISTINCT_VALUE_PROPS = dict()
logger = None
def __init__(self, wd_item_id='', new_item=False, data=None, mediawiki_api_url=None, sparql_endpoint_url=None,
wikibase_url=None, concept_base_uri=None, append_value=None, fast_run=False, fast_run_base_filter=None,
fast_run_use_refs=False, ref_handler=None, global_ref_mode='KEEP_GOOD', good_refs=None,
keep_good_ref_statements=False, search_only=False, item_data=None, user_agent=None, core_props=None,
core_prop_match_thresh=0.66, property_constraint_pid=None, distinct_values_constraint_qid=None,
fast_run_case_insensitive=False, debug=False):
"""
constructor
:param wd_item_id: Wikidata item id
:param new_item: This parameter lets the user indicate if a new item should be created
:type new_item: True or False
:param data: a dictionary with WD property strings as keys and the data which should be written to
a WD item as the property values
:type data: List[WDBaseDataType]
:param append_value: a list of properties where potential existing values should not be overwritten by the data
passed in the :parameter data.
:type append_value: list of property number strings
:param fast_run: True if this item should be run in fastrun mode, otherwise False. User setting this to True
should also specify the fast_run_base_filter for these item types
:type fast_run: bool
:param fast_run_base_filter: A property value dict determining the Wikidata property and the corresponding value
which should be used as a filter for this item type. Several filter criteria can be specified. The values
can be either Wikidata item QIDs, strings or empty strings if the value should be a variable in SPARQL.
Example: {'P352': '', 'P703': 'Q15978631'} if the basic common type of things this bot runs on is
human proteins (specified by Uniprot IDs (P352) and 'found in taxon' homo sapiens 'Q15978631').
:type fast_run_base_filter: dict
:param fast_run_use_refs: If `True`, fastrun mode will consider references in determining if a statement should
be updated and written to Wikidata. Otherwise, only the value and qualifiers are used. Default: False
:type fast_run_use_refs: bool
:param ref_handler: This parameter defines a function that will manage the reference handling in a custom
manner. This argument should be a function handle that accepts two arguments, the old/current statement
(first argument) and new/proposed/to be written statement (second argument), both of type: a subclass of
WDBaseDataType. The function should return an new item that is the item to be written. The item's values
properties or qualifiers should not be modified; only references. This function is also used in fastrun mode.
This will only be used if the ref_mode is set to "CUSTOM".
:type ref_handler: function
:param global_ref_mode: sets the reference handling mode for an item. Four modes are possible, 'STRICT_KEEP'
keeps all references as they are, 'STRICT_KEEP_APPEND' keeps the references as they are and appends
new ones. 'STRICT_OVERWRITE' overwrites all existing references for given. 'CUSTOM' will use the function
defined in ref_handler
:type global_ref_mode: str of value 'STRICT_KEEP', 'STRICT_KEEP_APPEND', 'STRICT_OVERWRITE', 'KEEP_GOOD', 'CUSTOM'
:param good_refs: This parameter lets the user define blocks of good references. It is a list of dictionaries.
One block is a dictionary with Wikidata properties as keys and potential values as the required value for
a property. There can be arbitrarily many key: value pairs in one reference block.
Example: [{'P248': 'Q905695', 'P352': None, 'P407': None, 'P1476': None, 'P813': None}]
This example contains one good reference block, stated in: Uniprot, Uniprot ID, title of Uniprot entry,
language of work and date when the information has been retrieved. A None type indicates that the value
varies from reference to reference. In this case, only the value for the Wikidata item for the
Uniprot database stays stable over all of these references. Key value pairs work here, as Wikidata
references can hold only one value for one property. The number of good reference blocks is not limited.
This parameter OVERRIDES any other reference mode set!!
:type good_refs: list containing dictionaries.
:param keep_good_ref_statements: Do not delete any statement which has a good reference, either defined in the
good_refs list or by any other referencing mode.
:type keep_good_ref_statements: bool
:param search_only: If this flag is set to True, the data provided will only be used to search for the
corresponding Wikidata item, but no actual data updates will performed. This is useful, if certain states or
values on the target item need to be checked before certain data is written to it. In order to write new
data to the item, the method update() will take data, modify the Wikidata item and a write() call will
then perform the actual write to Wikidata.
:type search_only: bool
:param item_data: A Python JSON object corresponding to the Wikidata item in wd_item_id. This can be used in
conjunction with wd_item_id in order to provide raw data.
:param user_agent: The user agent string to use when making http requests
:type user_agent: str
:param core_props: Core properties are used to retrieve a Wikidata item based on `data` if a `wd_item_id` is
not given. This is a set of PIDs to use. If None, all Wikidata properties with a distinct values
constraint will be used. (see: get_core_props)
:type core_props: set
:param core_prop_match_thresh: The proportion of core props that must match during retrieval of an item
when the wd_item_id is not specified.
:type core_prop_match_thresh: float
:param debug: Enable debug output.
:type debug: boolean
"""
self.core_prop_match_thresh = core_prop_match_thresh
self.wd_item_id = wd_item_id
self.new_item = new_item
self.mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
self.sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url
self.wikibase_url = config['WIKIBASE_URL'] if wikibase_url is None else wikibase_url
self.concept_base_uri = config['CONCEPT_BASE_URI'] if concept_base_uri is None else concept_base_uri
self.property_constraint_pid = config['PROPERTY_CONSTRAINT_PID'] if property_constraint_pid is None else property_constraint_pid
self.distinct_values_constraint_qid = config['DISTINCT_VALUES_CONSTRAINT_QID'] if distinct_values_constraint_qid is None else distinct_values_constraint_qid
self.data = [] if data is None else data
self.append_value = [] if append_value is None else append_value
self.fast_run = fast_run
self.fast_run_base_filter = fast_run_base_filter
self.fast_run_use_refs = fast_run_use_refs
self.fast_run_case_insensitive = fast_run_case_insensitive
self.ref_handler = ref_handler
self.global_ref_mode = global_ref_mode
self.good_refs = good_refs
self.keep_good_ref_statements = keep_good_ref_statements
self.search_only = search_only
self.item_data = item_data
self.user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
self.create_new_item = False
self.wd_json_representation = {}
self.statements = []
self.original_statements = []
self.entity_metadata = {}
self.fast_run_container = None
self.require_write = True
self.sitelinks = dict()
self.lastrevid = None # stores last revisionid after a write occurs
self.debug = debug
if fast_run_case_insensitive and not search_only:
raise ValueError("If using fast run case insensitive, search_only must be set")
if self.ref_handler:
assert callable(self.ref_handler)
if self.global_ref_mode == "CUSTOM" and self.ref_handler is None:
raise ValueError("If using a custom ref mode, ref_handler must be set")
if (core_props is None) and (self.sparql_endpoint_url not in self.DISTINCT_VALUE_PROPS):
self.get_distinct_value_props(self.sparql_endpoint_url, self.wikibase_url, self.property_constraint_pid,
self.distinct_values_constraint_qid)
self.core_props = core_props if core_props is not None else self.DISTINCT_VALUE_PROPS[self.sparql_endpoint_url]
try:
self.mrh = MappingRelationHelper(self.sparql_endpoint_url)
except Exception as e:
# if the "equivalent property" and "mappingRelation" property are not found, we can't know what the
# QIDs for the mapping relation types are
self.mrh = None
if self.debug:
warnings.warn("mapping relation types are being ignored")
if self.fast_run:
self.init_fastrun()
if self.debug:
if self.require_write:
if search_only:
print('successful fastrun, search_only mode, we can\'t determine if data is up to date')
else:
print('successful fastrun, because no full data match you need to update the item...')
else:
print('successful fastrun, no write to Wikidata required')
if self.wd_item_id != '' and self.create_new_item == True:
raise IDMissingError('Cannot create a new item, when a wikidata identifier is given')
elif self.new_item == True and len(self.data) > 0:
self.create_new_item = True
self.__construct_claim_json()
elif self.require_write:
self.init_data_load()
@classmethod
def get_distinct_value_props(cls, sparql_endpoint_url=None, wikibase_url=None, property_constraint_pid=None,
distinct_values_constraint_qid=None):
"""
On wikidata, the default core IDs will be the properties with a distinct values constraint
select ?p where {?p wdt:P2302 wd:Q21502410}
See: https://www.wikidata.org/wiki/Help:Property_constraints_portal
https://www.wikidata.org/wiki/Help:Property_constraints_portal/Unique_value
"""
sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url
wikibase_url = config['WIKIBASE_URL'] if wikibase_url is None else wikibase_url
property_constraint_pid = config[
'PROPERTY_CONSTRAINT_PID'] if property_constraint_pid is None else property_constraint_pid
distinct_values_constraint_qid = config[
'DISTINCT_VALUES_CONSTRAINT_QID'] if distinct_values_constraint_qid is None else distinct_values_constraint_qid
pcpid = property_constraint_pid
dvcqid = distinct_values_constraint_qid
query = '''
PREFIX wd: <{0}/entity/>
PREFIX wdt: <{0}/prop/direct/>
SELECT ?p WHERE {{
?p wdt:{1} wd:{2}
}}
'''.format(wikibase_url, pcpid, dvcqid)
df = cls.execute_sparql_query(query, endpoint=sparql_endpoint_url, as_dataframe=True)
if df.empty:
warnings.warn("Warning: No distinct value properties found\n" +
"Please set P2302 and Q21502410 in your wikibase or set `core_props` manually.\n" +
"Continuing with no core_props")
cls.DISTINCT_VALUE_PROPS[sparql_endpoint_url] = set()
return None
df.p = df.p.str.rsplit("/", 1).str[-1]
cls.DISTINCT_VALUE_PROPS[sparql_endpoint_url] = set(df.p)
def init_data_load(self):
if self.wd_item_id and self.item_data:
self.wd_json_representation = self.parse_wd_json(self.item_data)
elif self.wd_item_id:
self.wd_json_representation = self.get_wd_entity()
else:
qids_by_props = ''
try:
qids_by_props = self.__select_wd_item()
except WDSearchError as e:
self.log('ERROR', str(e))
if qids_by_props:
self.wd_item_id = qids_by_props
self.wd_json_representation = self.get_wd_entity()
self.__check_integrity()
if not self.search_only:
self.__construct_claim_json()
else:
self.data = []
def init_fastrun(self):
# We search if we already have a FastRunContainer with the same parameters to re-use it
for c in WDItemEngine.fast_run_store:
if (c.base_filter == self.fast_run_base_filter) and (c.use_refs == self.fast_run_use_refs) and \
(c.sparql_endpoint_url == self.sparql_endpoint_url):
self.fast_run_container = c
self.fast_run_container.ref_handler = self.ref_handler
if self.debug:
print('Found an already existing FastRunContainer')
if not self.fast_run_container:
self.fast_run_container = FastRunContainer(base_filter=self.fast_run_base_filter,
base_data_type=WDBaseDataType,
engine=self.__class__,
sparql_endpoint_url=self.sparql_endpoint_url,
mediawiki_api_url=self.mediawiki_api_url,
wikibase_url=self.wikibase_url,
concept_base_uri=self.concept_base_uri,
use_refs=self.fast_run_use_refs,
ref_handler=self.ref_handler,
case_insensitive=self.fast_run_case_insensitive,
debug=self.debug)
WDItemEngine.fast_run_store.append(self.fast_run_container)
if not self.search_only:
self.require_write = self.fast_run_container.write_required(self.data, append_props=self.append_value,
cqid=self.wd_item_id)
# set item id based on fast run data
if not self.require_write and not self.wd_item_id:
self.wd_item_id = self.fast_run_container.current_qid
else:
self.fast_run_container.load_item(self.data)
# set item id based on fast run data
if not self.wd_item_id:
self.wd_item_id = self.fast_run_container.current_qid
def rollback(self, login, bot_account=True, summary=""):
"""
:return:
"""
payload = {
'action': 'rollback',
'user': login.user,
'pageid': self.get_pageid(),
'token': login.rollback_token,
'markbot': bot_account,
'summary': summary,
'ids': self.wd_item_id,
'format': 'json'
}
headers = {
'User-Agent': self.user_agent
}
json_data = self.mediawiki_api_call("POST", self.mediawiki_api_url, session=login.get_session(), headers=headers, data=payload)
return json_data
def get_wd_entity(self):
"""
retrieve a WD item in json representation from Wikidata
:rtype: dict
:return: python complex dictionary represenation of a json
"""
params = {
'action': 'wbgetentities',
'sites': 'enwiki',
'ids': self.wd_item_id,
'format': 'json'
}
headers = {
'User-Agent': self.user_agent
}
json_data = self.mediawiki_api_call("GET", self.mediawiki_api_url, params=params, headers=headers)
return self.parse_wd_json(wd_json=json_data['entities'][self.wd_item_id])
def parse_wd_json(self, wd_json):
"""
Parses a WD entity json and generates the datatype objects, sets self.wd_json_representation
:param wd_json: the json of a WD entity
:type wd_json: A Python Json representation of a WD item
:return: returns the json representation containing 'labels', 'descriptions', 'statements', 'aliases', 'sitelinks'.
"""
wd_data = {x: wd_json[x] for x in ('pageid', 'lastrevid', 'modified', 'labels', 'descriptions', 'statements', 'aliases') if x in wd_json}
wd_data['sitelinks'] = dict()
self.entity_metadata = {x: wd_json[x] for x in wd_json if x not in
('labels', 'descriptions', 'statements', 'aliases', 'sitelinks')}
self.sitelinks = wd_json.get('sitelinks', dict())
self.pageid = wd_json.get('pageid')
self.statements = []
for prop in wd_data['statements']:
for z in wd_data['statements'][prop]:
print(z['mainsnak']['datavalue']['type'])
data_type = [x for x in WDBaseDataType.__subclasses__() if x.DTYPE == z['mainsnak']['datavalue']['type']][0]
statement = data_type.from_json(z)
self.statements.append(statement)
self.wd_json_representation = wd_data
self.original_statements = copy.deepcopy(self.statements)
return wd_data
@staticmethod
def get_wd_search_results(search_string='', mediawiki_api_url=None,
user_agent=None, max_results=500,
language='en', dict_id_label=False, dict_id_all_info=False):
"""
Performs a search in WD for a certain WD search string
:param search_string: a string which should be searched for in WD
:type search_string: str
:param mediawiki_api_url: Specify the mediawiki_api_url.
:type mediawiki_api_url: str
:param user_agent: The user agent string transmitted in the http header
:type user_agent: str
:param max_results: The maximum number of search results returned. Default 500
:type max_results: int
:param language: The language in which to perform the search. Default 'en'
:type language: str
:return: returns a list of QIDs found in the search and a list of labels complementary to the QIDs
:type dict_id_label: boolean
:return: returns a list of QIDs found in the search and a list of labels, descriptions, and wikidata urls complementary to the QIDs
:type dict_id_all_info: boolean
:return: function return a list with a dict of id, label, description, and url
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
params = {
'action': 'wbsearchentities',
'language': language,
'search': search_string,
'format': 'json',
'limit': 50
}
headers = {
'User-Agent': user_agent
}
cont_count = 1
results = []
while cont_count > 0:
params.update({'continue': 0 if cont_count == 1 else cont_count})
reply = requests.get(mediawiki_api_url, params=params, headers=headers)
reply.raise_for_status()
search_results = reply.json()
if search_results['success'] != 1:
raise WDSearchError('WD search failed')
else:
for i in search_results['search']:
if dict_id_all_info: # overrides dict_id_label if both are set to True
description = i['description'] if 'description' in i else ""
url = i['url'] if 'url' in i else ""
results.append({'id': i['id'], 'label': i['label'], 'description': description, 'url': url})
elif dict_id_label:
results.append({'id': i['id'], 'label': i['label']})
else:
results.append(i['id'])
if 'search-continue' not in search_results:
cont_count = 0
else:
cont_count = search_results['search-continue']
if cont_count > max_results:
break
return results
def get_property_list(self):
"""
List of properties on the current item
:return: a list of WD property ID strings (Pxxxx).
"""
property_list = set()
for x in self.statements:
property_list.add(x.get_prop_nr())
return list(property_list)
def __select_wd_item(self):
"""
The most likely WD item QID should be returned, after querying WDQ for all values in core_id properties
:return: Either a single WD QID is returned, or an empty string if no suitable item in WD
"""
qid_list = set()
conflict_source = {}
if self.mrh:
exact_qid = self.mrh.mrt_qids['http://www.w3.org/2004/02/skos/core#exactMatch']
mrt_pid = self.mrh.mrt_pid
else:
# This is a `hack` for if initializing the mapping relation helper fails. We can't determine the
# mapping relation type PID or the exact match QID. If we set mrt_pid to "Pxxx", then no qualifier will
# ever match it (and exact_qid will never get checked), and so what happens is exactly what would
# happen if the statement had no mapping relation qualifiers
exact_qid = "Q0"
mrt_pid = "PXXX"
for statement in self.data:
wd_property = statement.get_prop_nr()
# only use this statement if mapping relation type is exact, or mrt is not specified
mrt_qualifiers = [q for q in statement.get_qualifiers() if q.get_prop_nr() == mrt_pid]
if (len(mrt_qualifiers) == 1) and (mrt_qualifiers[0].get_value() != int(exact_qid[1:])):
continue
# TODO: implement special treatment when searching for date/coordinate values
data_point = statement.get_value()
if isinstance(data_point, tuple):
data_point = data_point[0]
core_props = self.core_props
if wd_property in core_props:
tmp_qids = set()
# if mrt_pid is "PXXX", this is fine, because the part of the SPARQL query using it is optional
query = statement.sparql_query.format(wb_url=self.wikibase_url, mrt_pid=mrt_pid, pid=wd_property,
value=data_point.replace("'", r"\'"))
results = WDItemEngine.execute_sparql_query(query=query, endpoint=self.sparql_endpoint_url)
for i in results['results']['bindings']:
qid = i['item_id']['value'].split('/')[-1]
if ('mrt' not in i) or ('mrt' in i and i['mrt']['value'].split('/')[-1] == exact_qid):
tmp_qids.add(qid)
qid_list.update(tmp_qids)
# Protocol in what property the conflict arises
if wd_property in conflict_source:
conflict_source[wd_property].append(tmp_qids)
else:
conflict_source[wd_property] = [tmp_qids]
if len(tmp_qids) > 1:
raise ManualInterventionReqException(
'More than one WD item has the same property value', wd_property, tmp_qids)
if len(qid_list) == 0:
self.create_new_item = True
return ''
if self.debug:
print(qid_list)
unique_qids = set(qid_list)
if len(unique_qids) > 1:
raise ManualInterventionReqException('More than one WD item has the same property value',
conflict_source, unique_qids)
elif len(unique_qids) == 1:
return list(unique_qids)[0]
def __construct_claim_json(self):
"""
Writes the properties from self.data to a new or existing json in self.wd_json_representation
:return: None
"""
def handle_qualifiers(old_item, new_item):
if not new_item.check_qualifier_equality:
old_item.set_qualifiers(new_item.get_qualifiers())
def is_good_ref(ref_block):
if len(WDItemEngine.databases) == 0:
WDItemEngine._init_ref_system()
prop_nrs = [x.get_prop_nr() for x in ref_block]
values = [x.get_value() for x in ref_block]
good_ref = True
prop_value_map = dict(zip(prop_nrs, values))
# if self.good_refs has content, use these to determine good references
if self.good_refs and len(self.good_refs) > 0:
found_good = True
for rblock in self.good_refs:
if not all([k in prop_value_map for k, v in rblock.items()]):
found_good = False
if not all([v in prop_value_map[k] for k, v in rblock.items() if v]):
found_good = False
if found_good:
return True
return False
# stated in, title, retrieved
ref_properties = ['P248', 'P1476', 'P813']
for v in values:
if prop_nrs[values.index(v)] == 'P248':
return True
elif v == 'P698':
return True
for p in ref_properties:
if p not in prop_nrs:
return False
for ref in ref_block:
pn = ref.get_prop_nr()
value = ref.get_value()
if pn == 'P248' and value not in WDItemEngine.databases and 'P854' not in prop_nrs:
return False
elif pn == 'P248' and value in WDItemEngine.databases:
db_props = WDItemEngine.databases[value]
if not any([False if x not in prop_nrs else True for x in db_props]) and 'P854' not in prop_nrs:
return False
return good_ref
def handle_references(old_item, new_item):
"""
Local function to handle references
:param old_item: An item containing the data as currently in WD
:type old_item: A child of WDBaseDataType
:param new_item: An item containing the new data which should be written to WD
:type new_item: A child of WDBaseDataType
"""
# stated in, title, language of work, retrieved, imported from
ref_properties = ['P248', 'P1476', 'P407', 'P813', 'P143']
new_references = new_item.get_references()
old_references = old_item.get_references()
if any([z.overwrite_references for y in new_references for z in y]) \
or sum(map(lambda z: len(z), old_references)) == 0 \
or self.global_ref_mode == 'STRICT_OVERWRITE':
old_item.set_references(new_references)
elif self.global_ref_mode == 'STRICT_KEEP' or new_item.statement_ref_mode == 'STRICT_KEEP':
pass
elif self.global_ref_mode == 'STRICT_KEEP_APPEND' or new_item.statement_ref_mode == 'STRICT_KEEP_APPEND':
old_references.extend(new_references)
old_item.set_references(old_references)
elif self.global_ref_mode == 'CUSTOM' or new_item.statement_ref_mode == 'CUSTOM':
self.ref_handler(old_item, new_item)
elif self.global_ref_mode == 'KEEP_GOOD' or new_item.statement_ref_mode == 'KEEP_GOOD':
keep_block = [False for x in old_references]
for count, ref_block in enumerate(old_references):
stated_in_value = [x.get_value() for x in ref_block if x.get_prop_nr() == 'P248']
if is_good_ref(ref_block):
keep_block[count] = True
new_ref_si_values = [x.get_value() if x.get_prop_nr() == 'P248' else None
for z in new_references for x in z]
for si in stated_in_value:
if si in new_ref_si_values:
keep_block[count] = False
refs = [x for c, x in enumerate(old_references) if keep_block[c]]
refs.extend(new_references)
old_item.set_references(refs)
# sort the incoming data according to the WD property number
self.data.sort(key=lambda z: z.get_prop_nr().lower())
# collect all statements which should be deleted
statements_for_deletion = []
for item in self.data:
if item.get_value() == '' and isinstance(item, WDBaseDataType):
statements_for_deletion.append(item.get_prop_nr())
if self.create_new_item:
self.statements = copy.copy(self.data)
else:
for stat in self.data:
prop_nr = stat.get_prop_nr()
prop_data = [x for x in self.statements if x.get_prop_nr() == prop_nr]
prop_pos = [x.get_prop_nr() == prop_nr for x in self.statements]
prop_pos.reverse()
insert_pos = len(prop_pos) - (prop_pos.index(True) if any(prop_pos) else 0)
# If value should be appended, check if values exists, if not, append
if prop_nr in self.append_value:
equal_items = [stat == x for x in prop_data]
if True not in equal_items:
self.statements.insert(insert_pos + 1, stat)
else:
# if item exists, modify rank
current_item = prop_data[equal_items.index(True)]
current_item.set_rank(stat.get_rank())
handle_references(old_item=current_item, new_item=stat)
handle_qualifiers(old_item=current_item, new_item=stat)
continue
# set all existing values of a property for removal
for x in prop_data:
# for deletion of single statements, do not set all others to delete
if hasattr(stat, 'remove'):
break
elif x.get_id() and not hasattr(x, 'retain'):
# keep statements with good references if keep_good_ref_statements is True
if self.keep_good_ref_statements:
if any([is_good_ref(r) for r in x.get_references()]):
setattr(x, 'retain', '')
else:
setattr(x, 'remove', '')
match = []
for i in prop_data:
if stat == i and hasattr(stat, 'remove'):
match.append(True)
setattr(i, 'remove', '')
elif stat == i:
match.append(True)
setattr(i, 'retain', '')
if hasattr(i, 'remove'):
delattr(i, 'remove')
handle_references(old_item=i, new_item=stat)
handle_qualifiers(old_item=i, new_item=stat)
i.set_rank(rank=stat.get_rank())
# if there is no value, do not add an element, this is also used to delete whole properties.
elif i.get_value():
match.append(False)
if True not in match and not hasattr(stat, 'remove'):
self.statements.insert(insert_pos + 1, stat)
# For whole property deletions, add remove flag to all statements which should be deleted
for item in copy.deepcopy(self.statements):
if item.get_prop_nr() in statements_for_deletion and item.get_id() != '':
setattr(item, 'remove', '')
elif item.get_prop_nr() in statements_for_deletion:
self.statements.remove(item)
# regenerate claim json
self.wd_json_representation['statements'] = {}
for stat in self.statements:
prop_nr = stat.get_prop_nr()
if prop_nr not in self.wd_json_representation['statements']:
self.wd_json_representation['statements'][prop_nr] = []
self.wd_json_representation['statements'][prop_nr].append(stat.get_json_representation())
def update(self, data, append_value=None):
"""
This method takes data, and modifies the Wikidata item. This works together with the data already provided via
the constructor or if the constructor is being instantiated with search_only=True. In the latter case, this
allows for checking the item data before deciding which new data should be written to the Wikidata item.
The actual write to Wikidata only happens on calling of the write() method. If data has been provided already
via the constructor, data provided via the update() method will be appended to these data.
:param data: A list of Wikidata statment items inheriting from WDBaseDataType
:type data: list
:param append_value: list with Wikidata property strings where the values should only be appended,
not overwritten.
:type: list
"""
assert type(data) == list
if append_value:
assert type(append_value) == list
self.append_value.extend(append_value)
self.data.extend(data)
self.statements = copy.deepcopy(self.original_statements)
if self.debug:
print(self.data)
if self.fast_run:
self.init_fastrun()
if self.require_write and self.fast_run:
self.init_data_load()
self.__construct_claim_json()
self.__check_integrity()
elif not self.fast_run:
self.__construct_claim_json()
self.__check_integrity()
def get_wd_json_representation(self):
"""
A method to access the internal json representation of the WD item, mainly for testing
:return: returns a Python json representation object of the WD item at the current state of the instance
"""
return self.wd_json_representation
def __check_integrity(self):
"""
A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed
"""
# all core props
wdi_core_props = self.core_props
# core prop statements that exist on the item
cp_statements = [x for x in self.statements if x.get_prop_nr() in wdi_core_props]
item_core_props = set(x.get_prop_nr() for x in cp_statements)
# core prop statements we are loading
cp_data = [x for x in self.data if x.get_prop_nr() in wdi_core_props]
# compare the claim values of the currently loaded QIDs to the data provided in self.data
# this is the number of core_ids in self.data that are also on the item
count_existing_ids = len([x for x in self.data if x.get_prop_nr() in item_core_props])
core_prop_match_count = 0
for new_stat in self.data:
for stat in self.statements:
if (new_stat.get_prop_nr() == stat.get_prop_nr()) and (new_stat.get_value() == stat.get_value()) \
and (new_stat.get_prop_nr() in item_core_props):
core_prop_match_count += 1
if core_prop_match_count < count_existing_ids * self.core_prop_match_thresh:
existing_core_pv = defaultdict(set)
for s in cp_statements:
existing_core_pv[s.get_prop_nr()].add(s.get_value())
new_core_pv = defaultdict(set)
for s in cp_data:
new_core_pv[s.get_prop_nr()].add(s.get_value())
nomatch_existing = {k: v - new_core_pv[k] for k, v in existing_core_pv.items()}
nomatch_existing = {k: v for k, v in nomatch_existing.items() if v}
nomatch_new = {k: v - existing_core_pv[k] for k, v in new_core_pv.items()}
nomatch_new = {k: v for k, v in nomatch_new.items() if v}
raise CorePropIntegrityException('Retrieved item ({}) does not match provided core IDs. '
'Matching count {}, non-matching count {}. '
.format(self.wd_item_id, core_prop_match_count,
count_existing_ids - core_prop_match_count) +
'existing unmatched core props: {}. '.format(nomatch_existing) +
'statement unmatched core props: {}.'.format(nomatch_new))
else:
return True
def get_pageid(self):
"""
Returns the pageid of a Wikidata item
:return:
"""
return self.wd_json_representation['pageid']
def get_label(self, lang='en'):
"""
Returns the label for a certain language
:param lang:
:type lang: str
:return: returns the label in the specified language, an empty string if the label does not exist
"""
if self.fast_run:
return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, 'label'))[0]
try:
return self.wd_json_representation['labels'][lang]['value']
except KeyError:
return ''
def set_label(self, label, lang='en'):
"""
Set the label for a WD item in a certain language
:param label: The description of the item in a certain language
:type label: str
:param lang: The language a label should be set for.
:type lang: str
:return: None
"""
if self.fast_run and not self.require_write:
self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,
lang_data=[label], lang=lang,
lang_data_type='label')
if self.require_write:
self.init_data_load()
else:
return
if 'labels' not in self.wd_json_representation:
self.wd_json_representation['labels'] = {}
self.wd_json_representation['labels'][lang] = {
'language': lang,
'value': label
}
def get_aliases(self, lang='en'):
"""
Retrieve the aliases in a certain language
:param lang: The Wikidata language the description should be retrieved for
:return: Returns a list of aliases, an empty list if none exist for the specified language
"""
if self.fast_run:
return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, 'aliases'))
alias_list = []
if 'aliases' in self.wd_json_representation and lang in self.wd_json_representation['aliases']:
for alias in self.wd_json_representation['aliases'][lang]:
alias_list.append(alias['value'])
return alias_list
def set_aliases(self, aliases, lang='en', append=True):
"""
set the aliases for a WD item
:param aliases: a list of strings representing the aliases of a WD item
:param lang: The language a description should be set for
:param append: If true, append a new alias to the list of existing aliases, else, overwrite. Default: True
:return: None
"""
if self.fast_run and not self.require_write:
self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,
lang_data=aliases, lang=lang,
lang_data_type='aliases')
if self.require_write:
self.init_data_load()
else:
return
if 'aliases' not in self.wd_json_representation:
self.wd_json_representation['aliases'] = {}
if not append or lang not in self.wd_json_representation['aliases']:
self.wd_json_representation['aliases'][lang] = []
for alias in aliases:
found = False
for current_aliases in self.wd_json_representation['aliases'][lang]:
if alias.strip().lower() != current_aliases['value'].strip().lower():
continue
else:
found = True
break
if not found:
self.wd_json_representation['aliases'][lang].append({
'language': lang,
'value': alias
})
def get_description(self, lang='en'):
"""
Retrieve the description in a certain language
:param lang: The Wikidata language the description should be retrieved for
:return: Returns the description string
"""
if self.fast_run:
return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, 'description'))[0]
if 'descriptions' not in self.wd_json_representation or lang not in self.wd_json_representation['descriptions']:
return ''
else:
return self.wd_json_representation['descriptions'][lang]['value']
def set_description(self, description, lang='en'):
"""
Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None
"""
if self.fast_run and not self.require_write:
self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,
lang_data=[description], lang=lang,
lang_data_type='description')
if self.require_write:
self.init_data_load()
else:
return
if 'descriptions' not in self.wd_json_representation:
self.wd_json_representation['descriptions'] = {}
self.wd_json_representation['descriptions'][lang] = {
'language': lang,
'value': description
}
def set_sitelink(self, site, title, badges=()):
"""
Set sitelinks to corresponding Wikipedia pages
:param site: The Wikipedia page a sitelink is directed to (e.g. 'enwiki')
:param title: The title of the Wikipedia page the sitelink is directed to
:param badges: An iterable containing Wikipedia badge strings.
:return:
"""
sitelink = {
'site': site,
'title': title,
'badges': badges
}
self.wd_json_representation['sitelinks'][site] = sitelink
self.sitelinks[site] = sitelink
def get_sitelink(self, site):
"""
A method to access the interwiki links in the json.model
:param site: The Wikipedia site the interwiki/sitelink should be returned for
:return: The interwiki/sitelink string for the specified Wikipedia will be returned.
"""
if site in self.sitelinks:
return self.sitelinks[site]
else:
return None
def write(self, login, bot_account=True, edit_summary='', entity_type='item', property_datatype='string',
max_retries=1000, retry_after=60):
"""
Writes the WD item Json to WD and after successful write, updates the object with new ids and hashes generated
by WD. For new items, also returns the new QIDs.
:param login: a instance of the class PBB_login which provides edit-cookies and edit-tokens
:param bot_account: Tell the Wikidata API whether the script should be run as part of a bot account or not.
:type bot_account: bool
:param edit_summary: A short (max 250 characters) summary of the purpose of the edit. This will be displayed as
the revision summary of the Wikidata item.
:type edit_summary: str
:param entity_type: Decides wether the object will become an item (default) or a property (with 'property')
:type entity_type: str
:param property_datatype: When payload_type is 'property' then this parameter set the datatype for the property
:type property_datatype: str
:param max_retries: If api request fails due to rate limiting, maxlag, or readonly mode, retry up to
`max_retries` times
:type max_retries: int
:param retry_after: Number of seconds to wait before retrying request (see max_retries)
:type retry_after: int
:return: the WD QID on sucessful write
"""
if not self.require_write:
return self.wd_item_id
if entity_type == 'property':
self.wd_json_representation['type'] = property_datatype
if 'sitelinks' in self.wd_json_representation:
del self.wd_json_representation['sitelinks']
payload = {
'action': 'wbeditentity',
'data': json.JSONEncoder().encode(self.wd_json_representation),
'format': 'json',
'token': login.get_edit_token(),
'summary': edit_summary,
'maxlag': config['MAXLAG']
}
headers = {
'content-type': 'application/x-www-form-urlencoded',
'charset': 'utf-8'
}
if bot_account:
payload.update({'bot': ''})
if self.create_new_item:
payload.update({u'new': entity_type})
else:
payload.update({u'id': self.wd_item_id})
try:
json_data = self.mediawiki_api_call('POST', self.mediawiki_api_url, session=login.get_session(),
max_retries=max_retries, retry_after=retry_after,
headers=headers, data=payload)
if 'error' in json_data and 'messages' in json_data['error']:
error_msg_names = set(x.get('name') for x in json_data["error"]['messages'])
if 'wikibase-validator-label-with-description-conflict' in error_msg_names:
raise NonUniqueLabelDescriptionPairError(json_data)
else:
raise WDApiError(json_data)
elif 'error' in json_data.keys():
raise WDApiError(json_data)
except Exception:
print('Error while writing to Wikidata')
raise
# after successful write, update this object with latest json, QID and parsed data types.
self.create_new_item = False
self.wd_item_id = json_data['entity']['id']
self.parse_wd_json(wd_json=json_data['entity'])
self.data = []
if "success" in json_data and "entity" in json_data and "lastrevid" in json_data["entity"]:
self.lastrevid = json_data["entity"]["lastrevid"]
if "success" in json_data and "entity" in json_data and "pageid" in json_data["entity"]:
self.pageid = json_data["entity"]["pageid"]
return self.wd_item_id
def check_entity_schema(self, eid=None, entity_schema_repo=None, output='confirm', max_steps=1):
"""
Static method which can be used to check for conformance of a Wikidata item to an EntitySchema any SPARQL query
:param qid: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes
:param eid: The EntitySchema identifier from Wikidata
:param sparql_endpoint_url: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:param output: results of a test of conformance on a given shape expression
:return: The results of the query are returned in string format
"""
rdfdata = wdi_rdf.WDqidRDFEngine(qid=self.wd_item_id, json_data=self.get_wd_json_representation(), max_steps=max_steps, current_step = 0).rdf_item
entity_schema_repo = config["ENTITY_SCHEMA_REPO"] if entity_schema_repo is None else entity_schema_repo
schema = requests.get(entity_schema_repo+eid).text
for result in ShExEvaluator(rdf=rdfdata, schema=schema, focus=config["CONCEPT_BASE_URI"] + self.wd_item_id).evaluate():
shex_result = dict()
if result.result:
shex_result["result"] = True
else:
shex_result["result"] = False
shex_result["reason"] = result.reason
shex_result["focus"] = result.focus
if output == "confirm":
return shex_result["result"]
elif output == "reason":
return shex_result["reason"]
else:
return shex_result
@staticmethod
def mediawiki_api_call(method, mediawiki_api_url=None,
session=None, max_retries=1000, retry_after=60, **kwargs):
"""
:param method: 'GET' or 'POST'
:param mediawiki_api_url:
:param session: If a session is passed, it will be used. Otherwise a new requests session is created
:param max_retries: If api request fails due to rate limiting, maxlag, or readonly mode, retry up to
`max_retries` times
:type max_retries: int
:param retry_after: Number of seconds to wait before retrying request (see max_retries)
:type retry_after: int
:param kwargs: Passed to requests.request
:return:
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
response = None
session = session if session else requests.session()
for n in range(max_retries):
try:
response = session.request(method, mediawiki_api_url, **kwargs)
except requests.exceptions.ConnectionError as e:
print("Connection error: {}. Sleeping for {} seconds.".format(e, retry_after))
time.sleep(retry_after)
continue
if response.status_code == 503:
print("service unavailable. sleeping for {} seconds".format(retry_after))
time.sleep(retry_after)
continue
response.raise_for_status()
json_data = response.json()
"""
wikidata api response has code = 200 even if there are errors.
rate limit doesn't return HTTP 429 either. may in the future
https://phabricator.wikimedia.org/T172293
"""
if 'error' in json_data:
# rate limiting
error_msg_names = set()
if 'messages' in json_data['error']:
error_msg_names = set(x.get('name') for x in json_data["error"]['messages'])
if 'actionthrottledtext' in error_msg_names:
sleep_sec = int(response.headers.get('retry-after', retry_after))
print("{}: rate limited. sleeping for {} seconds".format(datetime.datetime.utcnow(), sleep_sec))
time.sleep(sleep_sec)
continue
# maxlag
if 'code' in json_data['error'] and json_data['error']['code'] == 'maxlag':
sleep_sec = json_data['error'].get('lag', retry_after)
print("{}: maxlag. sleeping for {} seconds".format(datetime.datetime.utcnow(), sleep_sec))
time.sleep(sleep_sec)
continue
# readonly
if 'code' in json_data['error'] and json_data['error']['code'] == 'readonly':
print('Wikidata currently is in readonly mode, waiting for {} seconds'.format(retry_after))
time.sleep(retry_after)
continue
# there is no error or waiting. break out of this loop and parse response
break
else:
# the first time I've ever used for - else!!
# else executes if the for loop completes normally. i.e. does not encouter a `break`
# in this case, that means it tried this api call 10 times
raise WDApiError(response.json() if response else dict())
return json_data
@classmethod
def setup_logging(cls, log_dir="./logs", log_name=None, header=None, names=None,
delimiter=";", logger_name='WD_logger'):
"""
A static method which initiates log files compatible to .csv format, allowing for easy further analysis.
:param log_dir: allows for setting relative or absolute path for logging, default is ./logs.
:type log_dir: str
:param log_name: File name of log file to be written. e.g. "WD_bot_run-20160204.log". Default is "WD_bot_run"
and a timestamp of the current time
:type log_name: str
:param header: Log file will be prepended with header if given
:type header: str
:param names: Column names for the log file
:type names: list
:param delimiter: Log file will be delimited with `delimiter`
:type delimiter: str
"""
names = ["level", "timestamp", "external_id", "external_id_prop", "wdid", "msg", "msg_type",
"revid"] if names is None else names
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not log_name:
run_id = time.strftime('%Y%m%d_%H:%M', time.localtime())
log_name = "WD_bot_run-{}.log".format(run_id)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
log_file_name = os.path.join(log_dir, log_name)
file_handler = logging.FileHandler(log_file_name, mode='a')
file_handler.setLevel(logging.DEBUG)
fmt = '%(levelname)s{delimiter}%(asctime)s{delimiter}%(message)s'.format(delimiter=delimiter)
if header:
header = header if header.startswith("#") else "#" + header
header += "\n" + delimiter.join(names)
formatter = FormatterWithHeader(header, fmt=fmt, datefmt='%m/%d/%Y %H:%M:%S')
else:
formatter = FormatterWithHeader(delimiter.join(names), fmt=fmt, datefmt='%m/%d/%Y %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
cls.logger = logger
@classmethod
def log(cls, level, message):
"""
:param level: The log level as in the Python logging documentation, 5 different possible values with increasing
severity
:type level: String of value 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'.
:param message: The logging data which should be written to the log file. In order to achieve a csv-file
compatible format, all fields must be separated by a colon. Furthermore, all strings which could contain
colons, spaces or other special characters must be enclosed in double-quotes.
e.g. '{main_data_id}, "{exception_type}", "{message}", {wd_id}, {duration}'.format(
main_data_id=<main_id>,
exception_type=<excpetion type>,
message=<exception message>,
wd_id=<wikidata id>,
duration=<duration of action>
:type message: str
"""
if cls.logger is None:
cls.setup_logging()
log_levels = {'DEBUG': logging.DEBUG, 'ERROR': logging.ERROR, 'INFO': logging.INFO, 'WARNING': logging.WARNING,
'CRITICAL': logging.CRITICAL}
cls.logger.log(level=log_levels[level], msg=message)
@classmethod
def generate_item_instances(cls, items, mediawiki_api_url=None, login=None,
user_agent=None):
"""
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data.
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
assert type(items) == list
url = mediawiki_api_url
params = {
'action': 'wbgetentities',
'ids': '|'.join(items),
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
if login:
reply = login.get_session().get(url, params=params, headers=headers)
else:
reply = requests.get(url, params=params)
item_instances = []
for qid, v in reply.json()['entities'].items():
ii = cls(wd_item_id=qid, item_data=v)
ii.mediawiki_api_url = mediawiki_api_url
item_instances.append((qid, ii))
return item_instances
@staticmethod
@wdi_backoff()
def execute_sparql_query(query, prefix=None, endpoint=None,
user_agent=None, as_dataframe=False, max_retries=1000, retry_after=60):
"""
Static method which can be used to execute any SPARQL query
:param prefix: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes
:param query: The actual SPARQL query string
:param endpoint: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:param user_agent: Set a user agent string for the HTTP header to let the WDQS know who you are.
:param as_dataframe: Return result as pandas dataframe
:type user_agent: str
:param max_retries: The number time this function should retry in case of header reports.
:param retry_after: the number of seconds should wait upon receiving either an error code or the WDQS is not reachable.
:return: The results of the query are returned in JSON format
"""
sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if endpoint is None else endpoint
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
if prefix:
query = prefix + '\n' + query
params = {
'query': '#Tool: wdi_core fastrun\n' + query,
'format': 'json'
}
headers = {
'Accept': 'application/sparql-results+json',
'User-Agent': user_agent
}
response = None
for n in range(max_retries):
try:
response = requests.post(sparql_endpoint_url, params=params, headers=headers)
except requests.exceptions.ConnectionError as e:
print("Connection error: {}. Sleeping for {} seconds.".format(e, retry_after))
time.sleep(retry_after)
continue
if response.status_code == 503:
print("service unavailable. sleeping for {} seconds".format(retry_after))
time.sleep(retry_after)
continue
if response.status_code == 429:
if "retry-after" in response.headers.keys():
retry_after = response.headers["retry-after"]
print("service unavailable. sleeping for {} seconds".format(retry_after))
time.sleep(retry_after)
continue
response.raise_for_status()
results = response.json()
if as_dataframe:
return WDItemEngine._sparql_query_result_to_df(results)
else:
return results
@staticmethod
def _sparql_query_result_to_df(results):
def parse_value(item):
if item.get("type") == "http://www.w3.org/2001/XMLSchema#decimal":
return float(item['value'])
if item.get("type") == "http://www.w3.org/2001/XMLSchema#integer":
return int(item['value'])
if item.get("type") == "http://www.w3.org/2001/XMLSchema#dateTime":
return datetime.datetime.strptime(item['value'], '%Y-%m-%dT%H:%M:%SZ')
return item['value']
results = results['results']['bindings']
results = [{k: parse_value(v) for k, v in item.items()} for item in results]
df = pd.DataFrame(results)
return df
@staticmethod
def get_linked_by(qid, mediawiki_api_url=None):
"""
:param qid: Wikidata identifier to which other wikidata items link
:param mediawiki_api_url: default to wikidata's api, but can be changed to any wikibase
:return:
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
linkedby = []
whatlinkshere = json.loads(requests.get(
mediawiki_api_url + "?action=query&list=backlinks&format=json&bllimit=500&bltitle=" + qid).text)
for link in whatlinkshere["query"]["backlinks"]:
if link["title"].startswith("Q"):
linkedby.append(link["title"])
while 'continue' in whatlinkshere.keys():
whatlinkshere = json.loads(requests.get(
mediawiki_api_url + "?action=query&list=backlinks&blcontinue=" +
whatlinkshere['continue']['blcontinue'] + "&format=json&bllimit=50&bltitle=" + "Q42").text)
for link in whatlinkshere["query"]["backlinks"]:
if link["title"].startswith("Q"):
linkedby.append(link["title"])
return (linkedby)
@staticmethod
def get_rdf(qid, format="turtle", mediawiki_api_url=None):
"""
:param qid: Wikidata identifier to extract the RDF of
:format RDF from to return takes (turtle, ntriples, rdfxml, see https://rdflib.readthedocs.io/en/stable/apidocs/rdflib.html)
:param mediawiki_api_url: default to wikidata's api, but can be changed to any wikibase
:return:
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
localcopy = Graph()
localcopy.parse(config["CONCEPT_BASE_URI"] + qid + ".ttl")
return (localcopy.serialize(format=format))
@staticmethod
def merge_items(from_id, to_id, login_obj, mediawiki_api_url=None,
ignore_conflicts='', user_agent=None):
"""
A static method to merge two Wikidata items
:param from_id: The QID which should be merged into another item
:type from_id: string with 'Q' prefix
:param to_id: The QID into which another item should be merged
:type to_id: string with 'Q' prefix
:param login_obj: The object containing the login credentials and cookies
:type login_obj: instance of PBB_login.WDLogin
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param ignore_conflicts: A string with the values 'description', 'statement' or 'sitelink', separated
by a pipe ('|') if using more than one of those.
:type ignore_conflicts: str
"""
url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
headers = {
'content-type': 'application/x-www-form-urlencoded',
'charset': 'utf-8',
'User-Agent': user_agent
}
params = {
'action': 'wbmergeitems',
'fromid': from_id,
'toid': to_id,
'token': login_obj.get_edit_token(),
'format': 'json',
'bot': '',
'ignoreconflicts': ignore_conflicts
}
try:
# TODO: should we retry this?
merge_reply = requests.post(url=url, data=params, headers=headers, cookies=login_obj.get_edit_cookie())
merge_reply.raise_for_status()
if 'error' in merge_reply.json():
raise MergeError(merge_reply.json())
except requests.HTTPError as e:
print(e)
# TODO: should we return this?
return {'error': 'HTTPError'}
return merge_reply.json()
# TODO: adapt this function for wikibase (if possible)
@classmethod
def _init_ref_system(cls, sparql_endpoint_url=None):
db_query = '''
SELECT DISTINCT ?db ?wd_prop WHERE {
{?db wdt:P31 wd:Q2881060 . } UNION
{?db wdt:P31 wd:Q4117139 . } UNION
{?db wdt:P31 wd:Q8513 . } UNION
{?db wdt:P31 wd:Q324254 .}
OPTIONAL {
?db wdt:P1687 ?wd_prop .
}
}'''
for x in cls.execute_sparql_query(db_query, endpoint=sparql_endpoint_url)['results']['bindings']:
db_qid = x['db']['value'].split('/')[-1]
if db_qid not in cls.databases:
cls.databases.update({db_qid: []})
if 'wd_prop' in x:
cls.databases[db_qid].append(x['wd_prop']['value'].split('/')[-1])
@staticmethod
def delete_item(item, reason, login, mediawiki_api_url=None, user_agent=None):
"""
Takes a list of items and posts them for deletion by Wikidata moderators, appends at the end of the deletion
request page.
:param item: a QID which should be deleted
:type item: string
:param reason: short text about the reason for the deletion request
:type reason: str
:param login: A WDI login object which contains username and password the edit should be performed with.
:type login: wdi_login.WDLogin
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent
params = {
'action': 'delete',
'title': 'Item:' + item,
'reason': reason,
'token': login.get_edit_token(),
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers)
print(r.json())
@staticmethod
def delete_statement(statement_id, revision, login, mediawiki_api_url='https://www.wikidata.org/w/api.php',
user_agent=config['USER_AGENT_DEFAULT']):
params = {
'action': 'wbremoveclaims',
'claim': statement_id,
'token': login.get_edit_token(),
'baserevid': revision,
'bot': True,
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers)
print(r.json())
## References
@classmethod
def count_references(self, prop_id, user_agent=config['USER_AGENT_DEFAULT']):
counts = dict()
for claim in self.get_wd_json_representation()["statements"][prop_id]:
counts[claim["id"]] = len(claim["references"])
return counts
@classmethod
def get_reference_properties(self, prop_id, user_agent=config['USER_AGENT_DEFAULT']):
references = []
for statements in self.get_wd_json_representation()["statements"][prop_id]:
for reference in statements["references"]:
references.append(reference["snaks"].keys())
return references
@classmethod
def get_qualifier_properties(self, prop_id, user_agent=config['USER_AGENT_DEFAULT']):
qualifiers = []
for statements in self.get_wd_json_representation()["statements"][prop_id]:
for reference in statements["qualifiers"]:
qualifiers.append(reference["snaks"].keys())
return qualifiers
@classmethod
def wikibase_item_engine_factory(cls, mediawiki_api_url=config['MEDIAWIKI_API_URL'],
sparql_endpoint_url=config['SPARQL_ENDPOINT_URL'], name='LocalItemEngine'):
"""
Helper function for creating a WDItemEngine class with arguments set for a different Wikibase instance than
Wikidata.
:param mediawiki_api_url: Mediawiki api url. For wikidata, this is: 'https://www.wikidata.org/w/api.php'
:param sparql_endpoint_url: sparql endpoint url. For wikidata, this is: 'https://query.wikidata.org/sparql'
:param name: name of the resulting class
:return: a subclass of WDItemEngine with the mediawiki_api_url and sparql_endpoint_url arguments set
"""
mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url
sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url
class SubCls(cls):
def __init__(self, *args, **kwargs):
kwargs['mediawiki_api_url'] = mediawiki_api_url
kwargs['sparql_endpoint_url'] = sparql_endpoint_url
super(SubCls, self).__init__(*args, **kwargs)
SubCls.__name__ = name
return SubCls
"""A mixin implementing a simple __repr__."""
def __repr__(self):
return "<{klass} @{id:x} {attrs}>".format(
klass=self.__class__.__name__,
id=id(self) & 0xFFFFFF,
attrs="\r\n\t ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
class JsonParser(object):
references = []
qualifiers = []
final = False
current_type = None
def __init__(self, f):
self.f = f
def __call__(self, *args):
self.json_representation = args[1]
if self.final:
self.final = False
return self.f(cls=self.current_type, jsn=self.json_representation)
if 'mainsnak' in self.json_representation:
self.mainsnak = None
self.references = []
self.qualifiers = []
json_representation = self.json_representation
if 'references' in json_representation:
self.references.extend([[] for x in json_representation['references']])
for count, ref_block in enumerate(json_representation['references']):
ref_hash = ''
if 'hash' in ref_block:
ref_hash = ref_block['hash']
for prop in ref_block['snaks-order']:
jsn = ref_block['snaks'][prop]
for prop_ref in jsn:
ref_class = self.get_class_representation(prop_ref)
ref_class.is_reference = True
ref_class.snak_type = prop_ref['snaktype']
ref_class.set_hash(ref_hash)
self.references[count].append(copy.deepcopy(ref_class))
# print(self.references)
if 'qualifiers' in json_representation:
for prop in json_representation['qualifiers-order']:
for qual in json_representation['qualifiers'][prop]:
qual_hash = ''
if 'hash' in qual:
qual_hash = qual['hash']
qual_class = self.get_class_representation(qual)
qual_class.is_qualifier = True
qual_class.snak_type = qual['snaktype']
qual_class.set_hash(qual_hash)
self.qualifiers.append(qual_class)
# print(self.qualifiers)
mainsnak = self.get_class_representation(json_representation['mainsnak'])
mainsnak.set_references(self.references)
mainsnak.set_qualifiers(self.qualifiers)
if 'id' in json_representation:
mainsnak.set_id(json_representation['id'])
if 'rank' in json_representation:
mainsnak.set_rank(json_representation['rank'])
mainsnak.snak_type = json_representation['mainsnak']['snaktype']
return mainsnak
elif 'property' in self.json_representation:
return self.get_class_representation(jsn=self.json_representation)
def get_class_representation(self, jsn):
data_type = [x for x in WDBaseDataType.__subclasses__() if x.DTYPE == jsn["datavalue"]['type']][0]
self.final = True
self.current_type = data_type
return data_type.from_json(jsn)
class WDBaseDataType(object):
"""
The base class for all Wikidata data types, they inherit from it
"""
# example sparql query
"""
SELECT * WHERE {
?item_id p:P492 ?s .
?s ps:P492 '614212' .
OPTIONAL {?s pq:P4390 ?mrt}
}"""
sparql_query = '''
PREFIX wd: <{wb_url}/entity/>
PREFIX wdt: <{wb_url}/prop/direct/>
PREFIX p: <{wb_url}/prop/>
PREFIX ps: <{wb_url}/prop/statement/>
PREFIX pq: <{wb_url}/prop/qualifier/>
SELECT * WHERE {{
?item_id p:{pid} ?s .
?s ps:{pid} '{value}' .
OPTIONAL {{?s pq:{mrt_pid} ?mrt}}
}}
'''
def __init__(self, value, snak_type, data_type, is_reference, is_qualifier, references, qualifiers, rank, prop_nr,
check_qualifier_equality):
"""
Constructor, will be called by all data types.
:param value: Data value of the WD data snak
:type value: str or int or tuple
:param snak_type: The snak type of the WD data snak, three values possible, depending if the value is a
known (value), not existent (novalue) or unknown (somevalue). See WD documentation.
:type snak_type: a str of either 'value', 'novalue' or 'somevalue'
:param data_type: The WD data type declaration of this snak
:type data_type: str
:param is_reference: States if the snak is a reference, mutually exclusive with qualifier
:type is_reference: boolean
:param is_qualifier: States if the snak is a qualifier, mutually exlcusive with reference
:type is_qualifier: boolean
:param references: A one level nested list with reference WD snaks of base type WDBaseDataType, e.g.
references=[[<WDBaseDataType>, <WDBaseDataType>], [<WDBaseDataType>]]
This will create two references, the first one with two statements, the second with one
:type references: A one level nested list with instances of WDBaseDataType or children of it.
:param qualifiers: A list of qualifiers for the WD mainsnak
:type qualifiers: A list with instances of WDBaseDataType or children of it.
:param rank: The rank of a WD mainsnak, should determine the status of a value
:type rank: A string of one of three allowed values: 'normal', 'deprecated', 'preferred'
:param prop_nr: The WD property number a WD snak belongs to
:type prop_nr: A string with a prefixed 'P' and several digits e.g. 'P715' (Drugbank ID)
:return:
"""
self.value = value
self.snak_type = snak_type
self.data_type = data_type
if not references:
self.references = []
else:
self.references = references
self.qualifiers = qualifiers
self.is_reference = is_reference
self.is_qualifier = is_qualifier
self.rank = rank
self.check_qualifier_equality = check_qualifier_equality
self._statement_ref_mode = 'KEEP_GOOD'
if not references:
self.references = list()
if not self.qualifiers:
self.qualifiers = list()
if type(prop_nr) is int:
self.prop_nr = 'P' + str(prop_nr)
elif prop_nr.startswith('P'):
self.prop_nr = prop_nr
else:
self.prop_nr = 'P' + prop_nr
# Flag to allow complete overwrite of existing references for a value
self._overwrite_references = False
# WD internal ID and hash are issued by the WD servers
self.id = ''
self.hash = ''
self.json_representation = {
"snaktype": self.snak_type,
"property": self.prop_nr,
"datavalue": {
"type": self.data_type
},
#"type": self.data_type
}
self.snak_types = ['value', 'novalue', 'somevalue']
if snak_type not in self.snak_types:
raise ValueError('{} is not a valid snak type'.format(snak_type))
if self.is_qualifier and self.is_reference:
raise ValueError('A claim cannot be a reference and a qualifer at the same time')
if (len(self.references) > 0 or len(self.qualifiers) > 0) and (self.is_qualifier or self.is_reference):
raise ValueError('Qualifiers or references cannot have references')
def has_equal_qualifiers(self, other):
# check if the qualifiers are equal with the 'other' object
equal_qualifiers = True
self_qualifiers = copy.deepcopy(self.get_qualifiers())
other_qualifiers = copy.deepcopy(other.get_qualifiers())
if len(self_qualifiers) != len(other_qualifiers):
equal_qualifiers = False
else:
flg = [False for x in range(len(self_qualifiers))]
for count, i in enumerate(self_qualifiers):
for q in other_qualifiers:
if i == q:
flg[count] = True
if not all(flg):
equal_qualifiers = False
return equal_qualifiers
def __eq__(self, other):
equal_qualifiers = self.has_equal_qualifiers(other)
equal_values = self.get_value() == other.get_value() and self.get_prop_nr() == other.get_prop_nr()
if not (self.check_qualifier_equality and other.check_qualifier_equality) and equal_values:
return True
elif equal_values and equal_qualifiers:
return True
else:
return False
def __ne__(self, other):
equal_qualifiers = self.has_equal_qualifiers(other)
nonequal_values = self.get_value() != other.get_value() or self.get_prop_nr() != other.get_prop_nr()
if not (self.check_qualifier_equality and other.check_qualifier_equality) and nonequal_values:
return True
if nonequal_values or not equal_qualifiers:
return True
else:
return False
# DEPRECATED: the property overwrite_references will be deprecated ASAP and should not be used
@property
def overwrite_references(self):
return self._overwrite_references
@overwrite_references.setter
def overwrite_references(self, value):
assert (value is True or value is False)
print('DEPRECATED!!! Calls to overwrite_references should not be used')
self._overwrite_references = value
@property
def statement_ref_mode(self):
return self._statement_ref_mode
@statement_ref_mode.setter
def statement_ref_mode(self, value):
"""Set the reference mode for a statement, always overrides the global reference state."""
valid_values = ['STRICT_KEEP', 'STRICT_KEEP_APPEND', 'STRICT_OVERWRITE', 'KEEP_GOOD', 'CUSTOM']
if value not in valid_values:
raise ValueError('Not an allowed reference mode, allowed values {}'.format(' '.join(valid_values)))
self._statement_ref_mode = value
def get_value(self):
return self.value
def set_value(self, value):
if value is None and self.snak_type not in {'novalue', 'somevalue'}:
raise ValueError("If 'value' is None, snak_type must be novalue or somevalue")
if self.snak_type in {'novalue', 'somevalue'}:
del self.json_representation['datavalue']
elif 'datavalue' not in self.json_representation:
self.json_representation['datavalue'] = {}
def get_references(self):
return self.references
def set_references(self, references):
if len(references) > 0 and (self.is_qualifier or self.is_reference):
raise ValueError('Qualifiers or references cannot have references')
self.references = references
def get_qualifiers(self):
return self.qualifiers
def set_qualifiers(self, qualifiers):
# TODO: introduce a check to prevent duplicate qualifiers, those are not allowed in WD
if len(qualifiers) > 0 and (self.is_qualifier or self.is_reference):
raise ValueError('Qualifiers or references cannot have references')
self.qualifiers = qualifiers
def get_rank(self):
if self.is_qualifier or self.is_reference:
return ''
else:
return self.rank
def set_rank(self, rank):
if self.is_qualifier or self.is_reference:
raise ValueError('References or qualifiers do not have ranks')
valid_ranks = ['normal', 'deprecated', 'preferred']
if rank not in valid_ranks:
raise ValueError('{} not a valid rank'.format(rank))
self.rank = rank
def get_id(self):
return self.id
def set_id(self, claim_id):
self.id = claim_id
def set_hash(self, wd_hash):
self.hash = wd_hash
def get_hash(self):
return self.hash
def get_prop_nr(self):
return self.prop_nr
def set_prop_nr(self, prop_nr):
if prop_nr[0] != 'P':
raise ValueError('Invalid property number')
self.prop_nr = prop_nr
def is_reference(self):
return self.is_reference
def is_qualifier(self):
return self.is_qualifier
def get_json_representation(self):
if self.is_qualifier or self.is_reference:
tmp_json = {
self.prop_nr: [self.json_representation]
}
if self.hash != '' and self.is_qualifier:
self.json_representation.update({'hash': self.hash})
return tmp_json
else:
ref_json = []
for count, ref in enumerate(self.references):
snaks_order = []
snaks = {}
ref_json.append({
'snaks': snaks,
'snaks-order': snaks_order
})
for sub_ref in ref:
prop_nr = sub_ref.get_prop_nr()
# set the hash for the reference block
if sub_ref.get_hash() != '':
ref_json[count].update({'hash': sub_ref.get_hash()})
tmp_json = sub_ref.get_json_representation()
# if more reference values with the same property number, append to its specific property list.
if prop_nr in snaks:
snaks[prop_nr].append(tmp_json[prop_nr][0])
else:
snaks.update(tmp_json)
snaks_order.append(prop_nr)
qual_json = {}
qualifiers_order = []
for qual in self.qualifiers:
prop_nr = qual.get_prop_nr()
if prop_nr in qual_json:
qual_json[prop_nr].append(qual.get_json_representation()[prop_nr][0])
else:
qual_json.update(qual.get_json_representation())
qualifiers_order.append(qual.get_prop_nr())
statement = {
'mainsnak': self.json_representation,
'type': 'statement',
'rank': self.rank,
'qualifiers': qual_json,
'qualifiers-order': qualifiers_order,
'references': ref_json
}
if self.id != '':
statement.update({'id': self.id})
if hasattr(self, 'remove'):
statement.update({'remove': ''})
return statement
@classmethod
@JsonParser
def from_json(cls, json_representation):
pass
@classmethod
def delete_statement(cls, prop_nr):
"""
This serves as an alternative constructor for WDBaseDataType with the only purpose of holding a WD property
number and an empty string value in order to indicate that the whole statement with this property number of a
WD item should be deleted.
:param prop_nr: A WD property number as string
:return: An instance of WDBaseDataType
"""
return cls(value='', snak_type='value', data_type='', is_reference=False, is_qualifier=False, references=[],
qualifiers=[], rank='', prop_nr=prop_nr, check_qualifier_equality=True)
def equals(self, that, include_ref=False, fref=None):
"""
Tests for equality of two statements.
If comparing references, the order of the arguments matters!!!
self is the current statement, the next argument is the new statement.
Allows passing in a function to use to compare the references 'fref'. Default is equality.
fref accepts two arguments 'oldrefs' and 'newrefs', each of which are a list of references,
where each reference is a list of statements
"""
if not include_ref:
# return the result of WDBaseDataType.__eq__, which is testing for equality of value and qualifiers
return self == that
if include_ref and self != that:
return False
if include_ref and fref is None:
fref = WDBaseDataType.refs_equal
return fref(self, that)
@staticmethod
def refs_equal(olditem, newitem):
"""
tests for exactly identical references
"""
oldrefs = olditem.references
newrefs = newitem.references
ref_equal = lambda oldref, newref: True if (len(oldref) == len(newref)) and all(
x in oldref for x in newref) else False
if len(oldrefs) == len(newrefs) and all(
any(ref_equal(oldref, newref) for oldref in oldrefs) for newref in newrefs):
return True
else:
return False
"""A mixin implementing a simple __repr__."""
def __repr__(self):
return "<{klass} @{id:x} {attrs}>".format(
klass=self.__class__.__name__,
id=id(self) & 0xFFFFFF,
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
class WDString(WDBaseDataType):
"""
Implements the Wikidata data type 'string'
"""
DTYPE = 'string'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The string to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDString, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDString, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDMath(WDBaseDataType):
"""
Implements the Wikidata data type 'math' for mathematical formula in TEX format
"""
DTYPE = 'math'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The string to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDMath, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE, is_reference=is_reference,
is_qualifier=is_qualifier, references=references, qualifiers=qualifiers,
rank=rank, prop_nr=prop_nr, check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDMath, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDEDTF(WDBaseDataType):
"""
Implements the data type for Extended Date/Time Format (EDTF) extension.
More info: https://github.com/ProfessionalWiki/WikibaseEdtf
"""
DTYPE = 'edtf'
sparql_query = '''
SELECT * WHERE {{
?item_id <{wb_url}/prop/{pid}> ?s .
?s <{wb_url}/prop/statement/{pid}> '{value}'^^xsd:edtf .
}}
'''
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass BaseDataType
:param value: Value using the Extended Date/Time Format (EDTF)
:type value: str with a 'P' prefix, followed by several digits or only the digits without the 'P' prefix
:param prop_nr: The property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A data type with subclass of BaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A data type with subclass of BaseDataType
:param rank: rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDEDTF, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDEDTF, self).set_value(value=self.value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDExternalID(WDBaseDataType):
"""
Implements the Wikidata data type 'external-id'
"""
DTYPE = 'external-id'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The string to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDExternalID, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDExternalID, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDItemID(WDBaseDataType):
"""
Implements the Wikidata data type with a value being another WD item ID
"""
DTYPE = 'wikibase-entityid'
sparql_query = '''
PREFIX wd: <{wb_url}/entity/>
PREFIX wdt: <{wb_url}/prop/direct/>
PREFIX p: <{wb_url}/prop/>
PREFIX ps: <{wb_url}/prop/statement/>
PREFIX pq: <{wb_url}/prop/qualifier/>
SELECT * WHERE {{
?item_id p:{pid} ?s .
?s ps:{pid} wd:Q{value} .
OPTIONAL {{?s pq:{mrt_pid} ?mrt}}
}}
'''
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The WD item ID to serve as the value
:type value: str with a 'Q' prefix, followed by several digits or only the digits without the 'Q' prefix
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDItemID, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, (str, int)) or value is None, \
"Expected str or int, found {} ({})".format(type(value), value)
if value is None:
self.value = value
elif isinstance(value, int):
self.value = value
elif value.startswith("Q"):
pattern = re.compile('[0-9]+')
matches = pattern.match(value[1:])
if len(value[1:]) == len(matches.group(0)):
self.value = int(value[1:])
else:
raise ValueError('Invalid WD item ID, format must be "Q[0-9]*"')
else:
raise ValueError('Invalid WD item ID, format must be "Q[0-9]*"')
self.json_representation['datavalue'] = {
'value': {
'entity-type': 'item',
'numeric-id': self.value,
'id': 'Q{}'.format(self.value)
},
'type': 'wikibase-entityid'
}
super(WDItemID, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value']['numeric-id'], prop_nr=jsn['property'])
class WDProperty(WDBaseDataType):
"""
Implements the Wikidata data type with value 'property'
"""
DTYPE = 'wikibase-property'
sparql_query = '''
PREFIX wd: <{wb_url}/entity/>
PREFIX wdt: <{wb_url}/prop/direct/>
PREFIX p: <{wb_url}/prop/>
PREFIX ps: <{wb_url}/prop/statement/>
PREFIX pq: <{wb_url}/prop/qualifier/>
SELECT * WHERE {{
?item_id p:{pid} ?s .
?s ps:{pid} wd:P{value} .
OPTIONAL {{?s pq:{mrt_pid} ?mrt}}
}}
'''
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The WD property number to serve as a value
:type value: str with a 'P' prefix, followed by several digits or only the digits without the 'P' prefix
:param prop_nr: The WD property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDProperty, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, (str, int)) or value is None, \
"Expected str or int, found {} ({})".format(type(value), value)
if value is None:
self.value = value
elif isinstance(value, int):
self.value = value
elif value.startswith("P"):
pattern = re.compile('[0-9]+')
matches = pattern.match(value[1:])
if len(value[1:]) == len(matches.group(0)):
self.value = int(value[1:])
else:
raise ValueError('Invalid WD property ID, format must be "P[0-9]*"')
else:
raise ValueError('Invalid WD property ID, format must be "P[0-9]*"')
self.json_representation['datavalue'] = {
'value': {
'entity-type': 'property',
'numeric-id': self.value,
'id': 'P{}'.format(self.value)
},
'type': 'wikibase-entityid'
}
super(WDProperty, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value']['numeric-id'], prop_nr=jsn['property'])
class WDTime(WDBaseDataType):
"""
Implements the Wikidata data type with date and time values
"""
DTYPE = 'time'
def __init__(self, time, prop_nr, precision=11, timezone=0, calendarmodel=None,
concept_base_uri=None, is_reference=False, is_qualifier=False, snak_type='value',
references=None, qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param time: A time representation string in the following format: '+%Y-%m-%dT%H:%M:%SZ'
:type time: str in the format '+%Y-%m-%dT%H:%M:%SZ', e.g. '+2001-12-31T12:01:13Z'
:param prop_nr: The WD property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param precision: Precision value for dates and time as specified in the WD data model (https://www.mediawiki.org/wiki/Wikibase/DataModel#Dates_and_times)
:type precision: int
:param timezone: The timezone which applies to the date and time as specified in the WD data model
:type timezone: int
:param calendarmodel: The calendar model used for the date. URL to the WD calendar model item or the QID.
:type calendarmodel: str
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
calendarmodel = config['CALENDAR_MODEL_QID'] if calendarmodel is None else calendarmodel
concept_base_uri = config['CONCEPT_BASE_URI'] if concept_base_uri is None else concept_base_uri
if calendarmodel.startswith('Q'):
calendarmodel = concept_base_uri + calendarmodel
# the value is composed of what is requried to define the WD time object
value = (time, timezone, precision, calendarmodel)
super(WDTime, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE, is_reference=is_reference,
is_qualifier=is_qualifier, references=references, qualifiers=qualifiers, rank=rank,
prop_nr=prop_nr, check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
self.time, self.timezone, self.precision, self.calendarmodel = value
self.json_representation['datavalue'] = {
'value': {
'time': self.time,
'timezone': self.timezone,
'before': 0,
'after': 0,
'precision': self.precision,
'calendarmodel': self.calendarmodel
},
'type': 'time'
}
super(WDTime, self).set_value(value=self.time)
if self.time is not None:
assert isinstance(self.time, str), \
"WDTime time must be a string in the following format: '+%Y-%m-%dT%H:%M:%SZ'"
if self.precision < 0 or self.precision > 14:
raise ValueError('Invalid value for time precision, '
'see https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON#time')
if not (self.time.startswith("+") or self.time.startswith("-")):
self.time = "+" + self.time
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(time=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
value = jsn['datavalue']['value']
return cls(time=value['time'], prop_nr=jsn['property'], precision=value['precision'],
timezone=value['timezone'], calendarmodel=value['calendarmodel'])
class WDUrl(WDBaseDataType):
"""
Implements the Wikidata data type for URL strings
"""
DTYPE = 'url'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The URL to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDUrl, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE, is_reference=is_reference,
is_qualifier=is_qualifier, references=references, qualifiers=qualifiers, rank=rank,
prop_nr=prop_nr, check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
if value is None:
self.value = None
else:
protocols = ['http://', 'https://', 'ftp://', 'irc://', 'mailto:']
if True not in [True for x in protocols if value.startswith(x)]:
raise ValueError('Invalid URL')
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDUrl, self).set_value(value=self.value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDMonolingualText(WDBaseDataType):
"""
Implements the Wikidata data type for Monolingual Text strings
"""
DTYPE = 'monolingualtext'
def __init__(self, value, prop_nr, language='en', is_reference=False, is_qualifier=False, snak_type='value',
references=None, qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The language specific string to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param language: Specifies the WD language the value belongs to
:type language: str
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
self.language = language
value = (value, language)
super(WDMonolingualText, self) \
.__init__(value=value, snak_type=snak_type, data_type=self.DTYPE, is_reference=is_reference,
is_qualifier=is_qualifier, references=references, qualifiers=qualifiers, rank=rank,
prop_nr=prop_nr, check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
value = value[0]
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.json_representation['datavalue'] = {
'value': {
'text': value,
'language': self.language
},
'type': 'monolingualtext'
}
super(WDMonolingualText, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
value = jsn['datavalue']['value']
return cls(value=value['text'], prop_nr=jsn['property'], language=value['language'])
class WDQuantity(WDBaseDataType):
"""
Implements the Wikidata data type for quantities
"""
DTYPE = 'quantity'
def __init__(self, value, prop_nr, upper_bound=None, lower_bound=None, unit='1', is_reference=False,
is_qualifier=False, snak_type='value', references=None, qualifiers=None, rank='normal',
check_qualifier_equality=True, concept_base_uri=None):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The quantity value
:type value: float, str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param upper_bound: Upper bound of the value if it exists, e.g. for standard deviations
:type upper_bound: float, str
:param lower_bound: Lower bound of the value if it exists, e.g. for standard deviations
:type lower_bound: float, str
:param unit: The WD unit item URL or the QID a certain quantity has been measured
in (https://www.wikidata.org/wiki/Wikidata:Units). The default is dimensionless, represented by
a '1'
:type unit: str
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
concept_base_uri = config['CONCEPT_BASE_URI'] if concept_base_uri is None else concept_base_uri
if unit.startswith('Q'):
unit = concept_base_uri + unit
v = (value, unit, upper_bound, lower_bound)
super(WDQuantity, self).__init__(value=v, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(v)
def set_value(self, v):
value, unit, upper_bound, lower_bound = v
if value is not None:
value = self.format_amount(value)
unit = str(unit)
if upper_bound:
upper_bound = self.format_amount(upper_bound)
if lower_bound:
lower_bound = self.format_amount(lower_bound)
# Integrity checks for value and bounds
try:
for i in [value, upper_bound, lower_bound]:
if i:
float(i)
except ValueError as e:
raise ValueError('Value, bounds and units must parse as integers or float')
if (lower_bound and upper_bound) and (float(lower_bound) > float(upper_bound)
or float(lower_bound) > float(value)):
raise ValueError('Lower bound too large')
if upper_bound and float(upper_bound) < float(value):
raise ValueError('Upper bound too small')
self.json_representation['datavalue'] = {
'value': {
'amount': value,
'unit': unit,
'upperBound': upper_bound,
'lowerBound': lower_bound
},
'type': 'quantity'
}
# remove bounds from json if they are undefined
if not upper_bound:
del self.json_representation['datavalue']['value']['upperBound']
if not lower_bound:
del self.json_representation['datavalue']['value']['lowerBound']
self.value = (value, unit, upper_bound, lower_bound)
super(WDQuantity, self).set_value(value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, upper_bound=None, lower_bound=None, prop_nr=jsn['property'],
snak_type=jsn['snaktype'])
value = jsn['datavalue']['value']
upper_bound = value['upperBound'] if 'upperBound' in value else None
lower_bound = value['lowerBound'] if 'lowerBound' in value else None
return cls(value=value['amount'], prop_nr=jsn['property'], upper_bound=upper_bound,
lower_bound=lower_bound, unit=value['unit'])
def format_amount(self, amount):
# Remove .0 by casting to int
if float(amount) % 1 == 0:
amount = int(float(amount))
# Adding prefix + for positive number and 0
if not str(amount).startswith('+') and float(amount) >= 0:
amount = str('+{}'.format(amount))
# return as string
return str(amount)
class WDCommonsMedia(WDBaseDataType):
"""
Implements the Wikidata data type for Wikimedia commons media files
"""
DTYPE = 'commonsMedia'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The media file name from Wikimedia commons to be used as the value
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDCommonsMedia, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier,
references=references, qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.json_representation['datavalue'] = {
'value': value,
'type': 'string'
}
super(WDCommonsMedia, self).set_value(value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDLocalMedia(WDBaseDataType):
"""
Implements the data type for Wikibase local media files.
The new data type is introduced via the LocalMedia extension
https://github.com/ProfessionalWiki/WikibaseLocalMedia
"""
DTYPE = 'localMedia'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The media file name from the local Mediawiki to be used as the value
:type value: str
:param prop_nr: The property id for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDLocalMedia, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier,
references=references, qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.json_representation['datavalue'] = {
'value': value,
'type': 'string'
}
super(WDLocalMedia, self).set_value(value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDGlobeCoordinate(WDBaseDataType):
"""
Implements the Wikidata data type for globe coordinates
"""
DTYPE = 'globecoordinate'
def __init__(self, latitude, longitude, precision, prop_nr, globe=None,
concept_base_uri=None, is_reference=False, is_qualifier=False,
snak_type='value', references=None, qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param latitude: Latitute in decimal format
:type latitude: float
:param longitude: Longitude in decimal format
:type longitude: float
:param precision: Precision of the position measurement
:type precision: float
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
globe = config['COORDINATE_GLOBE_QID'] if globe is None else globe
concept_base_uri = config['CONCEPT_BASE_URI'] if concept_base_uri is None else concept_base_uri
if globe.startswith('Q'):
globe = concept_base_uri + globe
value = (latitude, longitude, precision, globe)
super(WDGlobeCoordinate, self) \
.__init__(value=value, snak_type=snak_type, data_type=self.DTYPE, is_reference=is_reference,
is_qualifier=is_qualifier, references=references, qualifiers=qualifiers, rank=rank,
prop_nr=prop_nr, check_qualifier_equality=check_qualifier_equality)
self.set_value(value)
def set_value(self, value):
# TODO: Introduce validity checks for coordinates
self.latitude, self.longitude, self.precision, self.globe = value
self.json_representation['datavalue'] = {
'value': {
'latitude': self.latitude,
'longitude': self.longitude,
'precision': self.precision,
'globe': self.globe
},
'type': 'globecoordinate'
}
super(WDGlobeCoordinate, self).set_value(self.latitude)
self.value = value
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(latitude=None, longitude=None, precision=None, prop_nr=jsn['property'],
snak_type=jsn['snaktype'])
value = jsn['datavalue']['value']
return cls(latitude=value['latitude'], longitude=value['longitude'], precision=value['precision'],
prop_nr=jsn['property'])
class WDGeoShape(WDBaseDataType):
"""
Implements the Wikidata data type 'geo-shape'
"""
DTYPE = 'geo-shape'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The GeoShape map file name in Wikimedia Commons to be linked
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDGeoShape, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
pattern = re.compile('Data:((?![:|#]).)+\.map')
matches = pattern.match(value)
if not matches:
raise ValueError(
'Value must start with Data: and end with .map. In addition title should not contain characters like colon, hash or pipe.')
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDGeoShape, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDMusicalNotation(WDBaseDataType):
"""
Implements the Wikidata data type 'string'
"""
DTYPE = 'musical-notation'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: Values for that data type are strings describing music following LilyPond syntax.
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDMusicalNotation, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier,
references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDMusicalNotation, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDTabularData(WDBaseDataType):
"""
Implements the Wikidata data type 'tabular-data'
"""
DTYPE = 'tabular-data'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: Reference to tabular data file on Wikimedia Commons.
:type value: str
:param prop_nr: The WD item ID for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDTabularData, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
pattern = re.compile('Data:((?![:|#]).)+\.tab')
matches = pattern.match(value)
if not matches:
raise ValueError(
'Value must start with Data: and end with .tab. In addition title should not contain characters like colon, hash or pipe.')
self.value = value
self.json_representation['datavalue'] = {
'value': self.value,
'type': 'string'
}
super(WDTabularData, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value'], prop_nr=jsn['property'])
class WDLexeme(WDBaseDataType):
"""
Implements the Wikidata data type with value 'wikibase-lexeme'
"""
DTYPE = 'wikibase-lexeme'
sparql_query = '''
PREFIX wd: <{wb_url}/entity/>
PREFIX wdt: <{wb_url}/prop/direct/>
PREFIX p: <{wb_url}/prop/>
PREFIX ps: <{wb_url}/prop/statement/>
PREFIX pq: <{wb_url}/prop/qualifier/>
SELECT * WHERE {{
?item_id p:{pid} ?s .
?s ps:{pid} wd:L{value} .
OPTIONAL {{?s pq:{mrt_pid} ?mrt}}
}}
'''
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The WD lexeme number to serve as a value
:type value: str with a 'P' prefix, followed by several digits or only the digits without the 'P' prefix
:param prop_nr: The WD property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDLexeme, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, (str, int)) or value is None, \
"Expected str or int, found {} ({})".format(type(value), value)
if value is None:
self.value = value
elif isinstance(value, int):
self.value = value
elif value.startswith("L"):
pattern = re.compile('[0-9]+')
matches = pattern.match(value[1:])
if len(value[1:]) == len(matches.group(0)):
self.value = int(value[1:])
else:
raise ValueError('Invalid WD lexeme ID, format must be "L[0-9]*"')
else:
raise ValueError('Invalid WD lexeme ID, format must be "L[0-9]*"')
self.json_representation['datavalue'] = {
'value': {
'entity-type': 'lexeme',
'numeric-id': self.value,
'id': 'L{}'.format(self.value)
},
'type': 'wikibase-entityid'
}
super(WDLexeme, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value']['numeric-id'], prop_nr=jsn['property'])
class WDForm(WDBaseDataType):
"""
Implements the Wikidata data type with value 'wikibase-form'
"""
DTYPE = 'wikibase-form'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The WD form number to serve as a value using the format "L<Lexeme ID>-F<Form ID>" (example: L252248-F2)
:type value: str with a 'P' prefix, followed by several digits or only the digits without the 'P' prefix
:param prop_nr: The WD property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDForm, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
if value is None:
self.value = value
elif value.startswith("L"):
pattern = re.compile('^L[0-9]+-F[0-9]+$')
matches = pattern.match(value)
if not matches:
raise ValueError('Invalid WD form ID, format must be "L[0-9]+-F[0-9]+"')
else:
raise ValueError('Invalid WD form ID, format must be "L[0-9]+-F[0-9]+"')
self.json_representation['datavalue'] = {
'value': {
'entity-type': 'form',
'id': self.value
},
'type': 'wikibase-entityid'
}
super(WDForm, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value']['id'], prop_nr=jsn['property'])
class WDSense(WDBaseDataType):
"""
Implements the Wikidata data type with value 'wikibase-sense'
"""
DTYPE = 'wikibase-sense'
def __init__(self, value, prop_nr, is_reference=False, is_qualifier=False, snak_type='value', references=None,
qualifiers=None, rank='normal', check_qualifier_equality=True):
"""
Constructor, calls the superclass WDBaseDataType
:param value: The WD form number to serve as a value using the format "L<Lexeme ID>-F<Form ID>" (example: L252248-F2)
:type value: str with a 'P' prefix, followed by several digits or only the digits without the 'P' prefix
:param prop_nr: The WD property number for this claim
:type prop_nr: str with a 'P' prefix followed by digits
:param is_reference: Whether this snak is a reference
:type is_reference: boolean
:param is_qualifier: Whether this snak is a qualifier
:type is_qualifier: boolean
:param snak_type: The snak type, either 'value', 'somevalue' or 'novalue'
:type snak_type: str
:param references: List with reference objects
:type references: A WD data type with subclass of WDBaseDataType
:param qualifiers: List with qualifier objects
:type qualifiers: A WD data type with subclass of WDBaseDataType
:param rank: WD rank of a snak with value 'preferred', 'normal' or 'deprecated'
:type rank: str
"""
super(WDSense, self).__init__(value=value, snak_type=snak_type, data_type=self.DTYPE,
is_reference=is_reference, is_qualifier=is_qualifier, references=references,
qualifiers=qualifiers, rank=rank, prop_nr=prop_nr,
check_qualifier_equality=check_qualifier_equality)
self.set_value(value=value)
def set_value(self, value):
assert isinstance(value, str) or value is None, "Expected str, found {} ({})".format(type(value), value)
if value is None:
self.value = value
elif value.startswith("L"):
pattern = re.compile('^L[0-9]+-S[0-9]+$')
matches = pattern.match(value)
if not matches:
raise ValueError('Invalid WD sense ID, format must be "L[0-9]+-S[0-9]+"')
else:
raise ValueError('Invalid WD sense ID, format must be "L[0-9]+-S[0-9]+"')
self.json_representation['datavalue'] = {
'value': {
'entity-type': 'sense',
'id': self.value
},
'type': 'wikibase-entityid'
}
super(WDSense, self).set_value(value=value)
@classmethod
@JsonParser
def from_json(cls, jsn):
if jsn['snaktype'] == 'novalue' or jsn['snaktype'] == 'somevalue':
return cls(value=None, prop_nr=jsn['property'], snak_type=jsn['snaktype'])
return cls(value=jsn['datavalue']['value']['id'], prop_nr=jsn['property'])
class WDApiError(Exception):
def __init__(self, wd_error_message):
"""
Base class for Wikidata error handling
:param wd_error_message: The error message returned by the WD API
:type wd_error_message: A Python json representation dictionary of the error message
:return:
"""
self.wd_error_msg = wd_error_message
def __str__(self):
return repr(self.wd_error_msg)
class NonUniqueLabelDescriptionPairError(WDApiError):
def __init__(self, wd_error_message):
"""
This class handles errors returned from the WD API due to an attempt to create an item which has the same \
label and description as an existing item in a certain language.\
:param wd_error_message: An WD API error mesage containing 'wikibase-validator-label-with-description-conflict'\
as the message name.\
:type wd_error_message: A Python json representation dictionary of the error message\
:return:"""
self.wd_error_msg = wd_error_message
def get_language(self):
"""
:return: Returns a 2 letter Wikidata language string, indicating the language which triggered the error
"""
return self.wd_error_msg['error']['messages'][0]['parameters'][1]
def get_conflicting_item_qid(self):
"""
TODO: Needs better explanation
:return: Returns the QID string of the item which has the same label and description as the one which should
be set.
"""
qid_string = self.wd_error_msg['error']['messages'][0]['parameters'][2]
return qid_string.split('|')[0][2:]
def __str__(self):
return repr(self.wd_error_msg)
class IDMissingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class WDSearchError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ManualInterventionReqException(Exception):
def __init__(self, value, property_string, item_list):
self.value = value + ' Property: {}, items affected: {}'.format(property_string, item_list)
def __str__(self):
return repr(self.value)
class CorePropIntegrityException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MergeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FormatterWithHeader(logging.Formatter):
def __init__(self, header, **kwargs):
super(FormatterWithHeader, self).__init__(**kwargs)
self.header = header
self.format = self.first_line_format
def first_line_format(self, record):
# First time in, switch back to the normal format function
self.format = super(FormatterWithHeader, self).format
return self.header + "\n" + self.format(record)
| {
"content_hash": "e5732d879d92630656dc13b138c51ea0",
"timestamp": "",
"source": "github",
"line_count": 3682,
"max_line_length": 164,
"avg_line_length": 43.96822379141771,
"alnum_prop": 0.5911631900476246,
"repo_name": "SuLab/WikidataIntegrator",
"id": "51419fccfeaf7f7eade21c2c1e91c363f82c4c04",
"size": "161891",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "wikidataintegrator/sdc_core.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "545146"
},
{
"name": "Python",
"bytes": "761370"
}
],
"symlink_target": ""
} |
r"""Classes for ADMM algorithms for Total Variation (TV) optimisation
with an :math:`\ell_1` data fidelity term"""
from __future__ import division, absolute_import
import copy
import numpy as np
from sporco.admm import admm
from sporco.array import zpad, atleast_nd, zdivide
from sporco.fft import real_dtype, fftn_func, ifftn_func
from sporco.signal import gradient_filters, grad, gradT
from sporco.linalg import rrs
from sporco.prox import prox_l1, prox_l2
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class TVL1Denoise(admm.ADMM):
r"""ADMM algorithm for :math:`\ell_1`-TV denoising problem
:cite:`alliney-1992-digital` :cite:`esser-2010-primal` (Sec. 2.4.4).
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
\| W_{\mathrm{df}} (\mathbf{x} - \mathbf{s}) \|_1 +
\lambda \left\| W_{\mathrm{tv}} \sqrt{(G_r \mathbf{x})^2 +
(G_c \mathbf{x})^2} \right\|_1
via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x},\mathbf{y}_d,\mathbf{y}_r,\mathbf{y}_c} \;
(1/2) \| W_{\mathrm{df}} \mathbf{y}_d \|_1 +
\lambda \left\| W_{\mathrm{tv}} \sqrt{(\mathbf{y}_r)^2 +
(\mathbf{y}_c)^2} \right\|_1 \;\text{such that}\;
\left( \begin{array}{c} G_r \\ G_c \\ I \end{array} \right)
\mathbf{x} - \left( \begin{array}{c} \mathbf{y}_r \\
\mathbf{y}_c \\ \mathbf{y}_d \end{array}
\right) = \left( \begin{array}{c} \mathbf{0} \\ \mathbf{0} \\
\mathbf{s} \end{array} \right) \;\;,
where :math:`G_r` and :math:`G_c` are gradient operators along array
rows and columns respectively, and :math:`W_{\mathrm{df}}` and
:math:`W_{\mathrm{tv}}` are diagonal weighting matrices.
While these equations describe the default behaviour of regularisation
in two dimensions, this class supports an arbitrary number of
dimensions. For example, for 3D TV regularisation in a 3D array,
the object should be initialised with parameter `axes` set to
`(0, 1, 2)`.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`\|
W_{\mathrm{df}} (\mathbf{x} - \mathbf{s}) \|_1`
``RegTV`` : Value of regularisation term :math:`\|
W_{\mathrm{tv}} \sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}
\|_1`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``GSIter`` : Number of Gauss-Seidel iterations
``GSRelRes`` : Relative residual of Gauss-Seidel solution
``Time`` : Cumulative run time
"""
class Options(admm.ADMM.Options):
"""TVL1Denoise algorithm options
Options include all of those defined in
:class:`sporco.admm.admm.ADMM.Options`, together with
additional options:
``gEvalY`` : Flag indicating whether the :math:`g` component
of the objective function should be evaluated using variable
Y (``True``) or X (``False``) as its argument.
``MaxGSIter`` : Maximum Gauss-Seidel iterations.
``GSTol`` : Gauss-Seidel stopping tolerance.
``DFidWeight`` : Data fidelity weight matrix.
``TVWeight`` : TV term weight matrix.
"""
defaults = copy.deepcopy(admm.ADMM.Options.defaults)
defaults.update({'gEvalY': True, 'RelaxParam': 1.8,
'DFidWeight': 1.0, 'TVWeight': 1.0,
'GSTol': 0.0, 'MaxGSIter': 2
})
defaults['AutoRho'].update({'Enabled': False, 'Period': 1,
'AutoScaling': True, 'Scaling': 1000.0,
'RsdlRatio': 1.2})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
TVL1Denoise algorithm options
"""
if opt is None:
opt = {}
admm.ADMM.Options.__init__(self, opt)
if self['AutoRho', 'RsdlTarget'] is None:
self['AutoRho', 'RsdlTarget'] = 1.0
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegTV')
itstat_fields_extra = ('GSIter', 'GSRelRes')
hdrtxt_objfn = ('Fnc', 'DFid', 'RegTV')
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid', 'RegTV': 'RegTV'}
def __init__(self, S, lmbda, opt=None, axes=(0, 1), caxis=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/tvl1den_init.svg
:width: 20%
:target: ../_static/jonga/tvl1den_init.svg
|
Parameters
----------
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter
opt : TVL1Denoise.Options object
Algorithm options
axes : tuple, optional (default (0, 1))
Axes on which TV regularisation is to be applied
caxis : int or None, optional (default None)
Axis on which channels of a multi-channel image are stacked.
If None, TV regularisation is applied indepdendently to each
channel, otherwise Vector TV :cite:`blomgren-1998-color`
regularisation is applied jointly to all channels.
"""
if opt is None:
opt = TVL1Denoise.Options()
# Set flag indicating whether problem involves real or complex
# values
self.real_dtype = np.isrealobj(S)
# Set dtype attribute based on S.dtype and opt['DataType']
self.set_dtype(opt, S.dtype)
self.S = np.asarray(S, dtype=self.dtype)
self.axes = axes
if caxis is None:
self.saxes = (-1,)
else:
self.saxes = (caxis, -1)
self.lmbda = real_dtype(self.dtype).type(lmbda)
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=(2.0*self.lmbda + 0.1),
dtype=real_dtype(self.dtype))
yshape = S.shape + (len(axes)+1,)
super(TVL1Denoise, self).__init__(S.size, yshape, yshape, S.dtype, opt)
self.Wdf = np.asarray(self.opt['DFidWeight'],
dtype=real_dtype(self.dtype))
self.lcw = self.LaplaceCentreWeight()
self.Wtv = np.asarray(self.opt['TVWeight'],
dtype=real_dtype(self.dtype))
if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim:
self.Wtvna = self.Wtv[..., np.newaxis]
else:
self.Wtvna = self.Wtv
# Need to initialise X because of Gauss-Seidel in xstep
self.X = self.S
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Yss = np.sqrt(np.sum(self.Y[..., 0:-1]**2, axis=self.S.ndim,
keepdims=True))
U0 = (self.lmbda/self.rho)*zdivide(self.Y[..., 0:-1], Yss)
U1 = (1.0 / self.rho)*np.sign(self.Y[..., -1:])
return np.concatenate((U0, U1), axis=self.S.ndim)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
ngsit = 0
gsrrs = np.inf
YU = self.Y - self.U
SYU = self.S + YU[..., -1]
YU[..., -1] = 0.0
ATYU = self.cnst_AT(YU)
while gsrrs > self.opt['GSTol'] and ngsit < self.opt['MaxGSIter']:
self.X = self.GaussSeidelStep(
SYU, self.X, ATYU, 1.0, self.lcw, 1.0)
gsrrs = rrs(
self.cnst_AT(self.cnst_A(self.X)),
self.cnst_AT(self.cnst_c() - self.cnst_B(self.Y) - self.U)
)
ngsit += 1
self.xs = (ngsit, gsrrs)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y[..., 0:-1] = prox_l2(
self.AX[..., 0:-1] + self.U[..., 0:-1],
(self.lmbda/self.rho)*self.Wtvna, axis=self.saxes)
self.Y[..., -1] = prox_l1(
self.AX[..., -1] + self.U[..., -1] - self.S,
(1.0/self.rho)*self.Wdf)
def obfn_gvar(self):
"""Variable to be evaluated in computing regularisation term,
depending on 'gEvalY' option value.
"""
if self.opt['gEvalY']:
return self.Y
else:
return self.cnst_A(self.X) - self.cnst_c()
def eval_objfn(self):
r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`(1/2) \| \mathbf{x} - \mathbf{s} \|_2^2` and
regularisation term is :math:`\| W_{\mathrm{tv}}
\sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}\|_1`.
"""
if self.real_dtype:
gvr = self.obfn_gvar()
else:
gvr = np.abs(self.obfn_gvar())
dfd = np.sum(np.abs(self.Wdf * gvr[..., -1]))
reg = np.sum(self.Wtv * np.sqrt(np.sum(gvr[..., 0:-1]**2,
axis=self.saxes)))
obj = dfd + self.lmbda*reg
return (obj, dfd, reg)
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xs[0], self.xs[1])
def cnst_A(self, X):
r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (G_r^T \;\; G_c^T
\;\; I)^T \mathbf{x}`.
"""
return np.concatenate(
[grad(X, ax)[..., np.newaxis] for ax in self.axes] +
[X[..., np.newaxis],], axis=X.ndim)
def cnst_AT(self, X):
r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
a component of ADMM problem constraint. In this case
:math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T \;\; I) \mathbf{x}`.
"""
return np.sum(np.concatenate(
[gradT(X[..., ax], ax)[..., np.newaxis] for ax in self.axes] +
[X[..., -1:],], axis=X.ndim-1), axis=X.ndim-1)
def cnst_B(self, Y):
r"""Compute :math:`B \mathbf{y}` component of ADMM problem
constraint. In this case :math:`B \mathbf{y} = -\mathbf{y}`.
"""
return -Y
def cnst_c(self):
r"""Compute constant component :math:`\mathbf{c}` of ADMM problem
constraint. In this case :math:`\mathbf{c} = (\mathbf{0} \;\;
\mathbf{0} \;\; \mathbf{s})`.
"""
c = np.zeros(self.S.shape + (len(self.axes)+1,), self.dtype)
c[..., -1] = self.S
return c
def rsdl_s(self, Yprev, Y):
"""Compute dual residual vector."""
return self.rho*np.linalg.norm(self.cnst_AT(self.U))
def rsdl_sn(self, U):
"""Compute dual residual normalisation term."""
return self.rho*np.linalg.norm(U)
def LaplaceCentreWeight(self):
"""Centre weighting matrix for TV Laplacian."""
sz = [1,] * self.S.ndim
for ax in self.axes:
sz[ax] = self.S.shape[ax]
lcw = 2*len(self.axes)*np.ones(sz, dtype=self.dtype)
for ax in self.axes:
lcw[(slice(None),)*ax + ([0, -1],)] -= 1.0
return lcw
def GaussSeidelStep(self, S, X, ATYU, rho, lcw, W2):
"""Gauss-Seidel step for linear system in TV problem."""
Xss = np.zeros_like(S, dtype=self.dtype)
for ax in self.axes:
Xss += zpad(X[(slice(None),)*ax + (slice(0, -1),)], (1, 0), ax)
Xss += zpad(X[(slice(None),)*ax + (slice(1, None),)],
(0, 1), ax)
return (rho*(Xss + ATYU) + W2*S) / (W2 + rho*lcw)
class TVL1Deconv(admm.ADMM):
r"""ADMM algorithm for :math:`\ell_1`-TV deconvolution problem.
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
\| W_{\mathrm{df}} (H \mathbf{x} - \mathbf{s}) \|_1 +
\lambda \left\| W_{\mathrm{tv}} \sqrt{(G_r \mathbf{x})^2 +
(G_c \mathbf{x})^2} \right\|_1 \;\;,
where :math:`H` denotes the linear operator corresponding to a
convolution, :math:`G_r` and :math:`G_c` are gradient operators
along array rows and columns respectively, and
:math:`W_{\mathrm{df}}` and :math:`W_{\mathrm{tv}}` are diagonal
weighting matrices, via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x},\mathbf{y}_d,\mathbf{y}_r,\mathbf{y}_c} \;
(1/2) \| W_{\mathrm{df}} \mathbf{y}_d \|_1 +
\lambda \left\| W_{\mathrm{tv}} \sqrt{(\mathbf{y}_r)^2 +
(\mathbf{y}_c)^2} \right\|_1 \;\text{such that}\;
\left( \begin{array}{c} G_r \\ G_c \\ H \end{array} \right)
\mathbf{x} - \left( \begin{array}{c} \mathbf{y}_r \\
\mathbf{y}_c \\ \mathbf{y}_d \end{array}
\right) = \left( \begin{array}{c} \mathbf{0} \\ \mathbf{0} \\
\mathbf{s} \end{array} \right) \;\;.
While these equations describe the default behaviour of regularisation
in two dimensions, this class supports an arbitrary number of
dimensions. For example, for 3D TV regularisation in a 3D array,
the object should be initialised with parameter `axes` set to
`(0, 1, 2)`.
Note that the convolution is implemented in the frequency domain,
having the same phase offset as :func:`.fftconv`, which differs from
that of :func:`scipy.ndimage.convolve` with the default ``origin``
parameter.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`\|
W_{\mathrm{df}} (H \mathbf{x} - \mathbf{s}) \|_1`
``RegTV`` : Value of regularisation term :math:`\|
W_{\mathrm{tv}} \sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}
\|_1`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(admm.ADMM.Options):
"""TVL1Deconv algorithm options
Options include all of those defined in
:class:`sporco.admm.admm.ADMM.Options`, together with
additional options:
``gEvalY`` : Flag indicating whether the :math:`g` component
of the objective function should be evaluated using variable
Y (``True``) or X (``False``) as its argument.
``LinSolveCheck`` : If ``True``, compute relative residual of
X step solver.
``DFidWeight`` : Data fidelity weight matrix.
``TVWeight`` : TV term weight matrix.
"""
defaults = copy.deepcopy(admm.ADMM.Options.defaults)
defaults.update(
{'gEvalY': True, 'RelaxParam': 1.8, 'LinSolveCheck': False,
'DFidWeight': 1.0, 'TVWeight': 1.0})
defaults['AutoRho'].update(
{'Enabled': False, 'Period': 1, 'AutoScaling': True,
'Scaling': 1000.0, 'RsdlRatio': 1.2})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
TVL1Deconv algorithm options
"""
if opt is None:
opt = {}
admm.ADMM.Options.__init__(self, opt)
if self['AutoRho', 'RsdlTarget'] is None:
self['AutoRho', 'RsdlTarget'] = 1.0
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegTV')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('Fnc', 'DFid', 'RegTV')
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid', 'RegTV': 'RegTV'}
def __init__(self, A, S, lmbda, opt=None, axes=(0, 1), caxis=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/tvl1dcn_init.svg
:width: 20%
:target: ../_static/jonga/tvl1dcn_init.svg
|
Parameters
----------
A : array_like
Filter kernel corresponding to operator :math:`H` above
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter
opt : TVL1Deconv.Options object
Algorithm options
axes : tuple, optional (default (0, 1))
Axes on which TV regularisation is to be applied
caxis : int or None, optional (default None)
Axis on which channels of a multi-channel image are stacked.
If None, TV regularisation is applied indepdendently to each
channel, otherwise Vector TV :cite:`blomgren-1998-color`
regularisation is applied jointly to all channels.
"""
if opt is None:
opt = TVL1Deconv.Options()
# Set flag indicating whether problem involves real or complex
# values, and get appropriate versions of functions from fft
# module
self.real_dtype = np.isrealobj(S)
self.fftn = fftn_func(self.real_dtype)
self.ifftn = ifftn_func(self.real_dtype)
# Set dtype attribute based on S.dtype and opt['DataType']
self.set_dtype(opt, S.dtype)
self.axes = axes
self.axsz = tuple([S.shape[i] for i in axes])
if caxis is None:
self.saxes = (-1,)
else:
self.saxes = (caxis, -1)
self.lmbda = real_dtype(self.dtype).type(lmbda)
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=(2.0*self.lmbda + 0.1),
dtype=real_dtype(self.dtype))
yshape = S.shape + (len(axes)+1,)
self.S = np.asarray(S, dtype=self.dtype)
super(TVL1Deconv, self).__init__(S.size, yshape, yshape, S.dtype, opt)
self.axshp = tuple([S.shape[k] for k in axes])
self.A = atleast_nd(S.ndim, A.astype(self.dtype))
self.Af = self.fftn(self.A, self.axshp, axes=axes)
self.Sf = self.fftn(self.S, axes=axes)
self.AHAf = np.conj(self.Af)*self.Af
self.AHSf = np.conj(self.Af)*self.Sf
self.Wdf = np.asarray(self.opt['DFidWeight'],
dtype=real_dtype(self.dtype))
self.Wtv = np.asarray(self.opt['TVWeight'],
dtype=real_dtype(self.dtype))
if hasattr(self.Wtv, 'ndim') and self.Wtv.ndim == S.ndim:
self.Wtvna = self.Wtv[..., np.newaxis]
else:
self.Wtvna = self.Wtv
self.Gf, self.GHGf = gradient_filters(S.ndim, axes, self.axshp,
dtype=self.dtype)
self.GAf = np.concatenate((self.Gf, self.Af[..., np.newaxis]),
axis=self.Gf.ndim-1)
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Yss = np.sqrt(np.sum(self.Y[..., 0:-1]**2, axis=self.S.ndim,
keepdims=True))
U0 = (self.lmbda/self.rho)*zdivide(self.Y[..., 0:-1], Yss)
U1 = (1.0 / self.rho)*np.sign(self.Y[..., -1:])
return np.concatenate((U0, U1), axis=self.S.ndim)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
b = self.AHSf + np.sum(
np.conj(self.GAf) * self.fftn(self.Y-self.U, axes=self.axes),
axis=self.Y.ndim-1)
self.Xf = b / (self.AHAf + self.GHGf)
self.X = self.ifftn(self.Xf, self.axsz, axes=self.axes)
if self.opt['LinSolveCheck']:
ax = (self.AHAf + self.GHGf)*self.Xf
self.xrrs = rrs(ax, b)
else:
self.xrrs = None
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y[..., 0:-1] = prox_l2(
self.AX[..., 0:-1] + self.U[..., 0:-1],
(self.lmbda/self.rho)*self.Wtvna, axis=self.saxes)
self.Y[..., -1] = prox_l1(
self.AX[..., -1] + self.U[..., -1] - self.S,
(1.0/self.rho)*self.Wdf)
def obfn_gvar(self):
"""Variable to be evaluated in computing regularisation term,
depending on 'gEvalY' option value.
"""
if self.opt['gEvalY']:
return self.Y
else:
return self.cnst_A(None, self.Xf) - self.cnst_c()
def eval_objfn(self):
r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`\| W_{\mathrm{df}} (H \mathbf{x} - \mathbf{s}) \|_1` and
regularisation term is :math:`\| W_{\mathrm{tv}}
\sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}\|_1`.
"""
if self.real_dtype:
gvr = self.obfn_gvar()
else:
gvr = np.abs(self.obfn_gvar())
dfd = np.sum(self.Wdf * np.abs(gvr[..., -1]))
reg = np.sum(self.Wtv * np.sqrt(np.sum(gvr[..., 0:-1]**2,
axis=self.saxes)))
obj = dfd + self.lmbda*reg
return (obj, dfd, reg)
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs,)
def cnst_A(self, X, Xf=None):
r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A \mathbf{x} = (G_r^T \;\;
G_c^T \;\; H)^T \mathbf{x}`.
"""
if Xf is None:
Xf = self.fftn(X, axes=self.axes)
return self.ifftn(self.GAf*Xf[..., np.newaxis], self.axsz,
axes=self.axes)
def cnst_AT(self, X):
r"""Compute :math:`A^T \mathbf{x}` where :math:`A \mathbf{x}` is
a component of ADMM problem constraint. In this case
:math:`A^T \mathbf{x} = (G_r^T \;\; G_c^T \;\; H^T) \mathbf{x}`.
"""
Xf = self.fftn(X, axes=self.axes)
return np.sum(self.ifftn(np.conj(self.GAf)*Xf, self.axsz,
axes=self.axes), axis=self.Y.ndim-1)
def cnst_B(self, Y):
r"""Compute :math:`B \mathbf{y}` component of ADMM problem
constraint. In this case :math:`B \mathbf{y} = -\mathbf{y}`.
"""
return -Y
def cnst_c(self):
r"""Compute constant component :math:`\mathbf{c}` of ADMM problem
constraint. In this case :math:`\mathbf{c} = (\mathbf{0} \;\;
\mathbf{0} \;\; \mathbf{s})`.
"""
c = np.zeros(self.S.shape + (len(self.axes)+1,), self.dtype)
c[..., -1] = self.S
return c
def rsdl_s(self, Yprev, Y):
"""Compute dual residual vector."""
return self.rho*np.linalg.norm(self.cnst_AT(self.U))
def rsdl_sn(self, U):
"""Compute dual residual normalisation term."""
return self.rho*np.linalg.norm(U)
| {
"content_hash": "e728a85c1e3a4a001226e3656f7205b2",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 79,
"avg_line_length": 32.288,
"alnum_prop": 0.5392302609844731,
"repo_name": "bwohlberg/sporco",
"id": "5380f68077e804416eceb9264a80345bd753ace8",
"size": "24507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sporco/admm/tvl1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1317433"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
import boto
import glob
import os
import nback.lib.logger as logger
import nback.lib.utils as utils
import nback.settings as settings
class AWSS3(logger.Logger):
FIRST_DAY_OF_MONTH = '01'
MB_IN_BYTES = 1049000
MULTIPART_UPLOAD_CHUNK_SIZE = 250 * MB_IN_BYTES
SPLIT_FILES_CMD = 'split -b %d %s %s'
FILES_ALL_PREFIX = '%s*'
log_file = settings.BACKUP_LOG_FILE
ref_class = 'aws-s3'
conn = None
bucket = None
bucket_name = None
access_key = None
secret_access_key = None
def __init__(self, bucket_name, access_key, secret_access_key):
logger.Logger.__init__(self)
self.bucket_name = bucket_name
self.access_key = access_key
self.secret_access_key = secret_access_key
def connect(self):
self.log.debug('connecting to AWS S3...')
self.conn = boto.connect_s3(self.access_key, self.secret_access_key)
self.bucket = self.conn.get_bucket(self.bucket_name)
self.log.debug('connected, bucket set to <%s>' % self.bucket_name)
def disconnect(self):
self.conn.close()
self.log.debug('connection to AWS S3 closed')
def upload(self, src):
dest = '%s/%s' % (settings.AWS_DIR, src)
src_file_size = os.path.getsize(src)
k = boto.s3.key.Key(self.bucket)
k.key = dest
self.log.debug('created S3 key %r' % k)
self.log.debug('source filesize is <%d> bytes' % src_file_size)
if src_file_size < self.MULTIPART_UPLOAD_CHUNK_SIZE:
self._standard_transfer(k, src, dest)
else:
self._multipart_transfer(k, src, src_file_size, dest)
def sync(self, filename):
# TODO(nandersson):
# * add some logic so we only get backups from this server here,
# e.g. match against server_name, suffix of 'tar.bz2' etc
key_list = self.bucket.list(prefix=settings.AWS_DIR)
for k in key_list:
if self._get_backup_filename(k.key) == filename: continue
remove_file = True
k_date = self._get_backup_date(k.key)
k_day = k_date[6:8] # fmt e.g.: 20130812
remove_file = not (settings.BACKUP_SAVE_EVERY_MONTH and
k_day == self.FIRST_DAY_OF_MONTH)
if remove_file:
for i in range(settings.BACKUP_DAYS_TO_STORE):
if k_date == utils.get_timestamp('%Y%m%d', i):
remove_file = False
break
if remove_file:
k.delete()
self.log.debug('removed outdated backup <%s>' % k.key)
self.log.debug('everything in sync')
def _standard_transfer(self, key, src, dest):
self.log.debug('uploading...')
key.set_contents_from_filename(src)
self.log.debug('upload successful')
def _multipart_transfer(self, key, src, src_file_size, dest):
# TODO(nandersson):
# * should be able to parallelize this, one for each core for example,
# to increase throughput (upload parts at the same time)
# NOTE(nandersson):
# * max filesize for a PUT request to AWS S3 API is 5GB, so need to
# split it up for files larger than that.. uploading in chunks of size
# set in self.MULTIPART_UPLOAD_CHUNK_SIZE
self.log.debug('uploading using multipart...')
chunks = (src_file_size / self.MULTIPART_UPLOAD_CHUNK_SIZE) + 1
self.log.debug('splitting <%d> bytes in <%d> chunks of max <%d> bytes'
% (src_file_size, chunks,
self.MULTIPART_UPLOAD_CHUNK_SIZE))
files = self._split_file(src, src_file_size,
self.MULTIPART_UPLOAD_CHUNK_SIZE)
mp = self.bucket.initiate_multipart_upload(key)
for i, filename in enumerate(files):
f = open(filename, 'r')
self.log.debug('uploading chunk <%d>, part file <%s>..'
% (i+1, filename))
mp.upload_part_from_file(f, i+1)
f.close()
os.remove(filename)
mp.complete_upload()
self.log.debug('upload successful')
@staticmethod
def _split_file(path, chunks, chunk_bytes):
# TODO(nandersson):
# * might want to move this to a more general file class
os.system(AWSS3.SPLIT_FILES_CMD % (chunk_bytes, path, path))
splitted_files = glob.glob(AWSS3.FILES_ALL_PREFIX % path)
splitted_files.remove(path)
return sorted(splitted_files)
@staticmethod
def _get_backup_filename(path):
path_tree = path.split('/')
return path_tree[len(path_tree)-1]
@staticmethod
def _get_backup_date(path):
file_day = AWSS3._get_backup_filename(path).split('-')
if len(file_day) == 0: return None
return file_day[len(file_day)-2]
| {
"content_hash": "7a342479523421baf34437af6211dd25",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 80,
"avg_line_length": 39.104,
"alnum_prop": 0.5822422258592471,
"repo_name": "Niklas9/naktul",
"id": "4741c634b42cd5e66775b9af5a29a837c3c78c14",
"size": "4889",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nback/lib/storage/aws_s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22619"
}
],
"symlink_target": ""
} |
"""Test Transformer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import unittest
from absl import flags
from absl.testing import flagsaver
import tensorflow as tf
from tensorflow.python.eager import context # pylint: disable=ungrouped-imports
from official.nlp.transformer import misc
from official.nlp.transformer import transformer_main
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
FIXED_TIMESTAMP = 'my_time_stamp'
WEIGHT_PATTERN = re.compile(r'weights-epoch-.+\.hdf5')
def _generate_file(filepath, lines):
with open(filepath, 'w') as f:
for l in lines:
f.write('{}\n'.format(l))
class TransformerTaskTest(tf.test.TestCase):
local_flags = None
def setUp(self):
temp_dir = self.get_temp_dir()
if TransformerTaskTest.local_flags is None:
misc.define_transformer_flags()
# Loads flags, array cannot be blank.
flags.FLAGS(['foo'])
TransformerTaskTest.local_flags = flagsaver.save_flag_values()
else:
flagsaver.restore_flag_values(TransformerTaskTest.local_flags)
FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP)
FLAGS.param_set = 'tiny'
FLAGS.use_synthetic_data = True
FLAGS.steps_between_evals = 1
FLAGS.train_steps = 2
FLAGS.validation_steps = 1
FLAGS.batch_size = 8
FLAGS.max_length = 1
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.dtype = 'fp32'
self.model_dir = FLAGS.model_dir
self.temp_dir = temp_dir
self.vocab_file = os.path.join(temp_dir, 'vocab')
self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size']
self.bleu_source = os.path.join(temp_dir, 'bleu_source')
self.bleu_ref = os.path.join(temp_dir, 'bleu_ref')
self.orig_policy = (
tf.compat.v2.keras.mixed_precision.experimental.global_policy())
def tearDown(self):
tf.compat.v2.keras.mixed_precision.experimental.set_policy(self.orig_policy)
def _assert_exists(self, filepath):
self.assertTrue(os.path.exists(filepath))
def test_train_no_dist_strat(self):
if context.num_gpus() >= 2:
self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
t = transformer_main.TransformerTask(FLAGS)
t.train()
def test_train_static_batch(self):
if context.num_gpus() >= 2:
self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
FLAGS.distribution_strategy = 'one_device'
if tf.test.is_built_with_cuda():
FLAGS.num_gpus = 1
else:
FLAGS.num_gpus = 0
FLAGS.static_batch = True
t = transformer_main.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_1_gpu_with_dist_strat(self):
FLAGS.distribution_strategy = 'one_device'
t = transformer_main.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_fp16(self):
FLAGS.distribution_strategy = 'one_device'
FLAGS.dtype = 'fp16'
t = transformer_main.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_2_gpu(self):
if context.num_gpus() < 2:
self.skipTest(
'{} GPUs are not available for this test. {} GPUs are available'
.format(2, context.num_gpus()))
FLAGS.distribution_strategy = 'mirrored'
FLAGS.num_gpus = 2
FLAGS.param_set = 'base'
t = transformer_main.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_2_gpu_fp16(self):
if context.num_gpus() < 2:
self.skipTest(
'{} GPUs are not available for this test. {} GPUs are available'
.format(2, context.num_gpus()))
FLAGS.distribution_strategy = 'mirrored'
FLAGS.num_gpus = 2
FLAGS.param_set = 'base'
FLAGS.dtype = 'fp16'
t = transformer_main.TransformerTask(FLAGS)
t.train()
def _prepare_files_and_flags(self, *extra_flags):
# Make log dir.
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
# Fake vocab, bleu_source and bleu_ref.
tokens = [
"'<pad>'", "'<EOS>'", "'_'", "'a'", "'b'", "'c'", "'d'", "'a_'", "'b_'",
"'c_'", "'d_'"
]
tokens += ["'{}'".format(i) for i in range(self.vocab_size - len(tokens))]
_generate_file(self.vocab_file, tokens)
_generate_file(self.bleu_source, ['a b', 'c d'])
_generate_file(self.bleu_ref, ['a b', 'd c'])
# Update flags.
update_flags = [
'ignored_program_name',
'--vocab_file={}'.format(self.vocab_file),
'--bleu_source={}'.format(self.bleu_source),
'--bleu_ref={}'.format(self.bleu_ref),
]
if extra_flags:
update_flags.extend(extra_flags)
FLAGS(update_flags)
def test_predict(self):
if context.num_gpus() >= 2:
self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
self._prepare_files_and_flags()
t = transformer_main.TransformerTask(FLAGS)
t.predict()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_predict_fp16(self):
if context.num_gpus() >= 2:
self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
self._prepare_files_and_flags('--dtype=fp16')
t = transformer_main.TransformerTask(FLAGS)
t.predict()
def test_eval(self):
if context.num_gpus() >= 2:
self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
if 'test_xla' in sys.argv[0]:
self.skipTest('TODO(xla): Make this test faster under XLA.')
self._prepare_files_and_flags()
t = transformer_main.TransformerTask(FLAGS)
t.eval()
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "a7212b0c89fe515e8c529bc7aced30b3",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 80,
"avg_line_length": 33.09604519774011,
"alnum_prop": 0.6563673608740185,
"repo_name": "tombstone/models",
"id": "a65cc4bcbf3a1c4281a36730a1ab60c496f3c7aa",
"size": "6547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/nlp/transformer/transformer_main_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
} |
from paython.lib.cc import CreditCard
from paython.exceptions import DataValidationError
from nose.tools import assert_equals, assert_false, assert_true, with_setup, raises
def setup():
"""setting up the test"""
global test_cards
test_cards = {
'visa': "4111111111111111",
'amex': "378282246310005",
'mc': "5555555555554444",
'discover': "6011111111111117",
'diners': "30569309025904"
}
def teardown():
"""teardowning the test"""
pass
@with_setup(setup, teardown)
@raises(DataValidationError)
def test_invalid():
"""test if a credit card number is luhn invalid"""
credit_card = CreditCard(
number = "4111111111111113", # invalid credit card
exp_mo = "12",
exp_yr = "2019",
first_name = "John",
last_name = "Doe",
cvv = "123",
strict = False
)
# safe check for luhn valid
assert_false(credit_card.is_valid())
# checking if the exception fires
credit_card.validate()
@with_setup(setup, teardown)
@raises(DataValidationError)
def test_invalid():
"""test if a credit card number is luhn invalid"""
credit_card = CreditCard(
number = "411111111111111a", # invalid credit card
exp_mo = "12",
exp_yr = "2019",
first_name = "John",
last_name = "Doe",
cvv = "123",
strict = False
)
# safe check for luhn valid
assert_false(credit_card.is_valid())
# checking if the exception fires
credit_card.validate()
@with_setup(setup, teardown)
@raises(DataValidationError)
def test_expired_credit_card():
"""test if a credit card number is expired"""
credit_card = CreditCard(
number = "4111111111111111",
exp_mo = "12",
exp_yr = "1990", # old ass credit card
first_name = "John",
last_name = "Doe",
cvv = "123",
strict = False
)
# safe check for luhn valid
assert_false(credit_card.is_valid())
# checking if the exception fires
credit_card.validate()
@with_setup(setup, teardown)
@raises(DataValidationError)
def test_invalid_cvv():
"""test if a credit card number has an invalid cvv"""
credit_card = CreditCard(
number = "4111111111111111",
exp_mo = "12",
exp_yr = "2018",
first_name = "John",
last_name = "Doe",
cvv = "1", # invalid cvv
strict = True
)
# safe check for luhn valid
assert_false(credit_card.is_valid())
# checking if the exception fires
credit_card.validate()
@with_setup(setup, teardown)
def test_valid():
"""test if a credit card number is luhn valid"""
for test_cc_type, test_cc_num in test_cards.items():
# create a credit card object
credit_card = CreditCard(
number = test_cc_num, # valid credit card
exp_mo = "12",
exp_yr = "2019",
first_name = "John",
last_name = "Doe",
cvv = "123",
strict = False
)
# safe check
assert_true(credit_card.is_valid())
# check the type
assert_equals(test_cc_type, credit_card.card_type)
@with_setup(setup, teardown)
def test_to_string():
"""test if a credit card outputs the right to str value"""
credit_card = CreditCard(
number = '4111111111111111',
exp_mo = '02',
exp_yr = '2012',
first_name = 'John',
last_name = 'Doe',
cvv = '911',
strict = False
)
# safe check
assert_true(credit_card.is_valid())
# checking if our str() method (or repr()) is ok
final_str = '<CreditCard -- John Doe, visa, ************1111, expires: 02/2012>'
assert_equals(str(credit_card), final_str)
@with_setup(setup, teardown)
def test_full_name():
"""testing full_name support"""
credit_card = CreditCard(
number = '4111111111111111',
exp_mo = '02',
exp_yr = '2012',
full_name = 'John Doe',
cvv = '911',
strict = False
)
# safe check
assert_true(credit_card.is_valid())
# checking if our str() method (or repr()) is ok
final_str = '<CreditCard -- John Doe, visa, ************1111, expires: 02/2012>'
assert_equals(str(credit_card), final_str)
@with_setup(setup, teardown)
def test_exp_styled():
"""testing support for 2 digits expiracy year"""
credit_card = CreditCard(
number = '4111111111111111',
exp_mo = '02',
exp_yr = '2012',
full_name = 'John Doe',
cvv = '911',
strict = False
)
credit_card._exp_yr_style = True
# safe check
assert_true(credit_card.is_valid())
# checking if our str() method (or repr()) is ok
final_str = '<CreditCard -- John Doe, visa, ************1111, expires: 02/2012 --extra: 12>'
assert_equals(str(credit_card), final_str)
| {
"content_hash": "f3fa6712e32c62b1a91ec9996fff8f12",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 96,
"avg_line_length": 28.359116022099446,
"alnum_prop": 0.5536723163841808,
"repo_name": "jacobpgallagher/Paython",
"id": "bd569189d592c2459a0cf01d0fac67eebbaf5644",
"size": "5133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_credit_card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94958"
}
],
"symlink_target": ""
} |
from flask import Flask
app = Flask(__name__)
import annotaria.views | {
"content_hash": "09389eeb5c5236282809b87420112690",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 23,
"avg_line_length": 17.25,
"alnum_prop": 0.7536231884057971,
"repo_name": "ciromattia/annotaria",
"id": "e14bee08eeb9ea92fc56bfc0764ec18e0e8f1b5e",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "annotaria/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17833"
},
{
"name": "JavaScript",
"bytes": "21243"
},
{
"name": "Python",
"bytes": "23899"
}
],
"symlink_target": ""
} |
import re
import json
import base64
from urllib import request
from OpenSSL import crypto
from urllib.error import HTTPError
from pay_with_amazon.payment_response import PaymentResponse
class IpnHandler(object):
"""Instant Payment Notifications (IPN) can be used to monitor the state
transition of payment objects.
Amazon sends you a notification when the state of any of the payment
objects or the Order Reference object changes. These notifications are
always sent without any action required on your part and can be used to
update any internal tracking or fulfillment systems you might be using to
manage the order.
After you receive an IPN, a best practice is to perform a get operation for
the respective object for which you have received the notification. You can
use the response of the get operation to update your systems.
With each notification you receive, you should configure your endpoint to
send Amazon a '200 OK' response immediately after receipt. If you do not
send this response or if your server is down when the SNS message is sent,
Amazon SNS will perform retries every hour for 14 days.
Amazon Simple Notification Service (Amazon SNS) is a fast, flexible, fully
managed push notification service.
"""
def __init__(self, body, headers):
"""
Parameters
----------
body : string
The body of the SNS message.
headers : dictionary
The headers of the SNS message.
Properties
----------
error : string
Holds the latest error, if any.
"""
self.error = None
self._root = None
self._ns = None
self._response_type = None
self._headers = headers
self._payload = json.loads(body.decode('utf-8'))
self._pem = None
self._message_encoded = self._payload['Message']
self._message = json.loads(self._payload['Message'])
self._message_id = self._payload['MessageId']
self._topic_arn = self._payload['TopicArn']
self._notification_data = self._message['NotificationData']
self._signing_cert_url = self._payload['SigningCertURL']
self._signature = self._payload['Signature']
self._timestamp = self._payload['Timestamp']
self._type = self._payload['Type']
self._xml = self._notification_data.replace(
'<?xml version="1.0" encoding="UTF-8"?>\n',
'')
def authenticate(self):
"""Attempt to validate a SNS message received from Amazon
From release version 2.7.9/3.4.3 on, Python by default attempts to
perform certificate validation. Returns True on success.
https://docs.python.org/2/library/httplib.html#httplib.HTTPSConnection
Changed in version 3.4.3: This class now performs all the necessary
certificate and hostname checks by default.
"""
self._validate_header()
self._validate_cert_url()
self._get_cert()
self._validate_signature()
return True
def _validate_header(self):
"""Compare the header topic_arn to the body topic_arn """
if 'X-Amz-Sns-Topic-Arn' in self._headers:
if self._topic_arn != self._headers.get(
'X-Amz-Sns-Topic-Arn'):
self.error = 'Invalid TopicArn.'
raise ValueError('Invalid TopicArn')
else:
self.error = 'Invalid TopicArn'
raise ValueError('Invalid TopicArn')
return True
def _validate_cert_url(self):
"""Checks to see if the certificate URL points to a AWS endpoint and
validates the signature using the .pem from the certificate URL.
"""
if not re.search(
'https\:\/\/sns\.(.*)\.amazonaws\.com(.*)\.pem',
self._signing_cert_url):
self.error = 'Certificate is not hosted at AWS URL'
raise ValueError('Certificate is not hosted at AWS URL')
return True
def _get_cert(self):
try:
cert_req = request.urlopen(
url=request.Request(self._signing_cert_url))
except HTTPError as ex:
self.error = 'Error retrieving certificate.'
raise ValueError(
'Error retrieving certificate. {0}'.format(
ex.reason))
self._pem = str(cert_req.read(), encoding='utf-8')
return True
def _validate_signature(self):
"""Generate signing string and validate signature"""
signing_string = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n{9}\n'.format(
'Message',
self._message_encoded,
'MessageId',
self._message_id,
'Timestamp',
self._timestamp,
'TopicArn',
self._topic_arn,
'Type',
self._type)
crt = crypto.load_certificate(crypto.FILETYPE_PEM, self._pem)
signature = base64.b64decode(self._signature)
try:
crypto.verify(
crt,
signature,
signing_string.encode('utf-8'),
'sha1')
except:
self.error = 'Invalid signature.'
raise ValueError('Invalid signature.')
return True
def to_json(self):
"""Retuns notification message as JSON"""
return PaymentResponse(self._xml).to_json()
def to_xml(self):
"""Retuns notification message as XML"""
return PaymentResponse(self._xml).to_xml()
| {
"content_hash": "3d66ddda9ef3bb16cda93a9835af368b",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 85,
"avg_line_length": 34.4478527607362,
"alnum_prop": 0.5991095280498664,
"repo_name": "srmccray/login-and-pay-with-amazon-sdk-python",
"id": "04a2f7211a550697338ef5c011de28810b9c401f",
"size": "5615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pay_with_amazon/ipn_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "103948"
}
],
"symlink_target": ""
} |
"""create notifications table
Revision ID: b2e218bdc6a2
Revises: f65b7be13efa
Create Date: 2017-03-31 20:44:45.212045
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b2e218bdc6a2'
down_revision = 'f65b7be13efa'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'notifications',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('users.id')),
sa.Column('type', sa.Text, nullable=False),
sa.Column('text', sa.Text, nullable=False),
sa.Column('link', sa.Text, nullable=False),
sa.Column('read', sa.Boolean, nullable=False)
)
def downgrade():
op.drop_table('notifications')
| {
"content_hash": "d0bc6b0434f2cf49111e89ab3ffddccf",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 70,
"avg_line_length": 23.08823529411765,
"alnum_prop": 0.6700636942675159,
"repo_name": "CrystalKoan/morpheus-api",
"id": "e1e7b4882e6ad25808e4bf7d6fdd51335cae9831",
"size": "785",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/model/alembic/versions/b2e218bdc6a2_create_notifications_table.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "940"
},
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "141781"
}
],
"symlink_target": ""
} |
__all__ = ["singleton", "classproperty", "cached_property"]
from cached_property import cached_property
from singleton import singleton
class ClassPropertyDescriptor(object):
def __init__(self, func, name=None, doc=None):
self.func = func.__func__
self.__name__ = name or self.func.__name__
self.__module__ = self.func.__module__
self.__doc__ = doc or self.func.__doc__
# 改用self.is_cached替换ClassPropertyDescriptor, 后者在类上使用装饰器时会失效
self.is_cached = False
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
if self.is_cached:
return klass.__dict__.get(self.__name__)
else:
value = self.func(klass)
setattr(klass, self.__name__, value)
self.is_cached = True
return value
def classproperty(func):
"""
Mimic werkzeug.utils's cached_property.
A decorator that converts a function into a lazy class property.
The function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@classproperty
def bar(cls):
# calculate something important here
return 42
Foo.bar # => 42
The class has to have a `__dict__` in order for this property to
work.
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
| {
"content_hash": "372834c133263818b849a594ead4d6ad",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 72,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.6025641025641025,
"repo_name": "Luiti/etl_utils",
"id": "89a0417ea7047016b9200056f620e3f05d33f8f9",
"size": "1621",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "etl_utils/design_pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46924"
},
{
"name": "Ruby",
"bytes": "1627"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
from . import conditions_combinations as combinations
class Condition(ABC):
"""Base interface of conditions"""
@abstractmethod
def __call__(self, context):
"""Execute condition
Args:
context (dict): execution context
Returns:
bool
"""
def __and__(self, other):
"""Combine current condition with another one using the & operator
Returns:
AndCondition
"""
return combinations.AndCondition([self, other])
def __or__(self, other):
"""Combine current condition with another one using the | operator
Returns:
OrCondition
"""
return combinations.OrCondition([self, other])
class StaticCondition(Condition):
def __init__(self, value):
"""
Args:
value (bool): value returned by condition
"""
self._value = value
def __call__(self, context):
"""Execute condition
Args:
context (dict): execution context
Returns:
bool
"""
return self._value
class FalseCondition(StaticCondition):
"""Condition that always return False"""
def __init__(self):
super().__init__(False)
class TrueCondition(StaticCondition):
"""Condition that always return True"""
def __init__(self):
super().__init__(True)
| {
"content_hash": "9f8611b9cab070a4770f47370f1e2be5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 21.53731343283582,
"alnum_prop": 0.5675675675675675,
"repo_name": "Clustaar/clustaar.authorize",
"id": "6997b51f1fbe2bd07dd77458998e6d70da97a518",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/clustaar/authorize/conditions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20994"
}
],
"symlink_target": ""
} |
from flask import Flask, Response,request, jsonify, url_for
import pika
import sys
import os
import socket
app = Flask(__name__)
# Enable debugging if the DEBUG environment variable is set and starts with Y
# app.debug = os.environ.get("DEBUG", "").lower().startswith('y')
#
hostname = socket.gethostname()
# urandom = os.open("/dev/urandom", os.O_RDONLY)
@app.route('/')
def index():
url = ""
with app.test_request_context():
url = url_for('service', servicename="rabbitmq", queuename="images")
return url
@app.route('/service/<servicename>/queue/<queuename>')
def service(servicename, queuename):
try:
msg_count = count_queue_msg(servicename,queuename)
return jsonify(
err=False,
service=servicename,
load=msg_count,
queue=queuename
)
except :
return jsonify(
err=True,
msg="Error in connecting"+servicename
)
def count_queue_msg(service, queue):
url = "amqp://guest:guest@"+service+":5672"
print("connecting to : " +url)
connection = pika.BlockingConnection(pika.URLParameters(url))
# Open the channel
channel = connection.channel()
# Declare the queue
queue = channel.queue_declare(queue=queue,
passive=True,
durable=True,
exclusive=False,
auto_delete=False
)
connection.close()
c = queue.method.message_count
return c
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3002)
| {
"content_hash": "4600a9f92a92432e2afdfcc74b84573b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.581022797288971,
"repo_name": "di-unipi-socc/DockerFinder",
"id": "ea6e8997d07fb2374662e6e703ee843cfce6cc09",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "management/monitor/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1998"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "JavaScript",
"bytes": "25518"
},
{
"name": "Python",
"bytes": "172892"
},
{
"name": "Shell",
"bytes": "13615"
},
{
"name": "TypeScript",
"bytes": "22724"
}
],
"symlink_target": ""
} |
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
import threading
import time
class ExchGwApiBitflyer(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
super(ExchGwApiBitflyer, self).__init__()
@classmethod
def get_timestamp_offset(cls):
return 1000
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'exec_date'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'side'
@classmethod
def get_trade_id_field_name(cls):
return 'id'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'size'
@classmethod
def get_order_book_link(cls, instmt):
return 'https://api.bitflyer.jp/v1/getboard?product_code={}'.format(instmt.instmt_code)
@classmethod
def get_trades_link(cls, instmt):
return 'https://api.bitflyer.jp/v1/getexecutions?product_cdoe={}'.format(instmt.instmt_code)
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# No Date time information, has update id only
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x['price'], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i]['price']) if type(bids[i]['price']) != float else bids[i]['price']
l2_depth.bids[i].volume = float(bids[i]['size']) if type(bids[i]['size']) != float else bids[i]['size']
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x['price'])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i]['price']) if type(asks[i]['price']) != float else asks[i]['price']
l2_depth.asks[i].volume = float(asks[i]['size']) if type(asks[i]['size']) != float else asks[i]['size']
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = raw[cls.get_trades_timestamp_field_name()]
try:
trade.date_time = datetime.strptime(date_time, '%Y-%m-%dT%H:%M:%S.%f').strftime('%Y%m%d %H:%M:%S.%f')
except Exception as e:
trade.date_time = datetime.strptime(date_time, '%Y-%m-%dT%H:%M:%S').strftime('%Y%m%d %H:%M:%S.%f')
# Trade side
trade.trade_side = Trade.parse_side(raw[cls.get_trade_side_field_name()])
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(cls.get_order_book_link(instmt), verify_cert=False)
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
# If verify cert, got <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:749)>
res = cls.request(link, verify_cert=False)
trades = []
if len(res) > 0:
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwBitflyer(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiBitflyer(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Bitflyer'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
time.sleep(1)
continue
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
exchange_name = 'Bitflyer'
instmt_name = 'BTC_JPY'
instmt_code = 'BTC_JPY'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
Logger.init_log()
db_client = SqlClientTemplate()
exch = ExchGwBitflyer([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.start(instmt)
#exch.get_order_book_worker(instmt)
#exch.get_trades_worker(instmt)
| {
"content_hash": "91ac518ddead2292df6db4818ad15d02",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 125,
"avg_line_length": 34.50729927007299,
"alnum_prop": 0.5569539925965098,
"repo_name": "Aurora-Team/BitcoinExchangeFH",
"id": "57e40f7368e3174b0c333a02b924d1c97d238d6b",
"size": "9455",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "befh/exchanges/bitflyer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "325307"
}
],
"symlink_target": ""
} |
from pkg_resources import require
require("cothread==2.14")
from cothread.catools import *
import cothread
from Generic_BPMDevice import *
from subprocess import Popen, PIPE
import numpy as np
class SparkERXR_EPICS_BPMDevice(Generic_BPMDevice):
"""Libera BPM Device class that uses Epics to communicate with PVs.
All of the methods here will attempt to be generic enough to work for most Libera devices.
Some libera BPM devices will have extra functionality. To implement this make a child class
that will extend this one.
Attributes:
epicsID (str): Channel identifier string that will be used to access PVs.
"""
def _trigger_epics(self):
"""Private method to update the EPICS variables
This will write a value to the .PROC record that will update all of the
process variables on that database.
Args:
Returns:
"""
caput(self.epicsID + ".PROC", 1) # Write to the .PROC data base to update all of the values
def _read_epics_pv(self, pv):
"""Private method to read an Epics process variable.
Args:
pv (str): Name of the Epics process variable to read.
Returns:
variant: Value of requested process variable.
"""
self._trigger_epics() # Update all values before reading
return caget(self.epicsID + pv) # Read selected epics PV
def _write_epics_pv(self, pv, value):
"""Private method to read an Epics process variable.
Args:
pv (str): Name of the Epics process variable to read.
value (variant): The value to be written to the epics variable
Returns:
variant: Value of requested process variable after writing to it
"""
caput(self.epicsID+pv, value) # Write to EPICs PV
return self._read_epics_pv(pv)
def __init__(self, database, daq_type):
"""Initializes the Libera BPM device object and assigns it an ID.
Args:
dev_ID (str/int): The two digit ID number assigned to that specific BPM device.
Returns:
.
"""
if type(database) and type(daq_type) != str:
raise TypeError
self.epicsID = database+":signals:"+daq_type # Different signal types can be used
self._write_epics_pv(".SCAN", 0) # Required so that values can be read from he database
self._trigger_epics() # Triggers the first count
pv = ".X" # Pick a PV that is hosted on the device
node = connect(self.epicsID + pv, cainfo=True).host.split(":")[0] # Get the IP address of the host
host_info = Popen(["arp", "-n", node], stdout=PIPE).communicate()[0] # Get info about the host using arp
host_info = host_info.split("\n")[1] # Split the info sent back
index = host_info.find(":") # Find the first ":", used in the MAC address
host_info = host_info[index - 2:index + 15] # Get the MAC address
self.macaddress = host_info
print "Opened link with" + self.get_device_ID() # Tells the user they have connected to the device
def __del__(self):
print "Closed link with" + self.get_device_ID() # Tells the user they have connected to the device
def get_X_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: X position in mm
"""
self._trigger_epics() # Triggers the acquisition
x = self._read_epics_pv(".X") # Gets the PV value
x = np.mean(x) # Gets the mean PV value
x = x/1000000.0 # Converts from nm to mm
return x
def get_Y_position(self):
"""Override method, gets the calculated X position of the beam.
Args:
Returns:
float: Y position in mm
"""
self._trigger_epics() # Triggers the acquisition
y = self._read_epics_pv(".Y") # Gets the PV value
y = np.mean(y) # Gets the mean PV value
y = y/1000000.0 # Converts from nm to mm
return y
def get_beam_current(self):
"""Override method, gets the beam current read by the BPMs.
Args:
Returns:
float: Current in mA
"""
# This function is not finished it needs to convert from ADC counts to mA
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum") # Gets the PV value
daq_sum = np.mean(daq_sum) # Gets the mean PV value
return daq_sum
def get_input_power(self):
"""Override method, gets the input power of the signals input to the device
Args:
Returns:
float: Input power in dBm
"""
# This function is not finished it needs to convert from ADC counts to dBm
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum") # Gets the PV value
daq_sum = np.mean(daq_sum) # Gets the mean PV value
return daq_sum
def get_ADC_sum(self):
"""Override method, gets the input power of the signals input to the device
Args:
Returns:
int: Input power in dBm
"""
self._trigger_epics() # Triggers the acquisition
daq_sum = self._read_epics_pv(".Sum")
daq_sum = np.mean(daq_sum) # Gets the PV value
daq_sum = np.round(daq_sum) # Rounds the mean to the nearest integer
return daq_sum
def get_raw_BPM_buttons(self):
"""Override method, gets the raw signal from each BPM.
Args:
Returns:
int: Raw signal from BPM A
int: Raw signal from BPM B
int: Raw signal from BPM C
int: Raw signal from BPM D
"""
self._trigger_epics() # triggers the acquisition
a = self._read_epics_pv(".A") # gets the PV value
b = self._read_epics_pv(".B")
c = self._read_epics_pv(".C")
d = self._read_epics_pv(".D")
a = np.mean(a) # gets the mean PV value
b = np.mean(b)
c = np.mean(c)
d = np.mean(d)
a = np.round(a) # Round the PV to the nearest integer
b = np.round(b)
c = np.round(c)
d = np.round(d)
return a, b, c, d
def get_normalised_BPM_buttons(self):
"""Override method, gets the normalised signal from each BPM.
Args:
Returns:
float: Normalised signal from BPM A
float: Normalised signal from BPM B
float: Normalised signal from BPM C
float: Normalised signal from BPM D
"""
self._trigger_epics() # Triggers the acquisition
a, b, c, d = self.get_raw_BPM_buttons() # Gets the RAW bpm buttons
sum_button = a + b + c + d # Calculates the BPM sum
sum_button = sum_button/4.0 # Gets the average BPM sum
a = a/sum_button # Normalises the A button
b = b/sum_button # Normalises the B button
c = c/sum_button # Normalises the C button
d = d/sum_button # Normalises the D button
return (a,b,c,d)
def get_device_ID(self):
"""Override method, gets the device's epics ID and MAC address
Args:
Returns:
str: Device with epics channel ID and MAC address
"""
return "Libera BPM with the Epics ID " + "\"" + self.epicsID + "\" and the MAC Address \"" + self.macaddress + "\""
def get_input_tolerance(self):
"""Override method, gets the maximum input power the device can take
The devices will break if the input power is too high, as such, each device has their
own tolerances, this function will return this tolerance. It should be used to ensure
that the power put into the device is not too high to break the device.
Args:
Returns:
float: max input power in dBm
"""
return -40 # The max continuous input the spark can withstand in dBm
| {
"content_hash": "b0ad88669609ed04de67d055be24d9cb",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 123,
"avg_line_length": 36.066964285714285,
"alnum_prop": 0.5933902710731526,
"repo_name": "dharryman/BPM_Test_Framework",
"id": "cccc5897c9e9dac672532fd40084f12725a15b05",
"size": "8079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BPMDevice/SparkERXR_EPICS_BPMDevice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "169878"
},
{
"name": "TeX",
"bytes": "856"
}
],
"symlink_target": ""
} |
import socket
from os import mkdir
from os.path import join, exists
from sys import platform
from asyncio import sleep
from math import sqrt
from uuid import uuid4
from enum import Enum
from csv import DictReader
from cyrandom import choice, shuffle, uniform
from time import time
from pickle import dump as pickle_dump, load as pickle_load, HIGHEST_PROTOCOL
from geopy import Point
from geopy.distance import distance
from aiopogo import utilities as pgoapi_utils
from pogeo import get_distance
from . import bounds, sanitized as conf
IPHONES = {'iPhone5,1': 'N41AP',
'iPhone5,2': 'N42AP',
'iPhone5,3': 'N48AP',
'iPhone5,4': 'N49AP',
'iPhone6,1': 'N51AP',
'iPhone6,2': 'N53AP',
'iPhone7,1': 'N56AP',
'iPhone7,2': 'N61AP',
'iPhone8,1': 'N71AP',
'iPhone8,2': 'N66AP',
'iPhone8,4': 'N69AP',
'iPhone9,1': 'D10AP',
'iPhone9,2': 'D11AP',
'iPhone9,3': 'D101AP',
'iPhone9,4': 'D111AP'}
class Units(Enum):
miles = 1
kilometers = 2
meters = 3
def best_factors(n):
return next(((i, n//i) for i in range(int(n**0.5), 0, -1) if n % i == 0))
def percentage_split(seq, percentages):
percentages[-1] += 1.0 - sum(percentages)
prv = 0
size = len(seq)
cum_percentage = 0
for p in percentages:
cum_percentage += p
nxt = int(cum_percentage * size)
yield seq[prv:nxt]
prv = nxt
def get_start_coords(worker_no, grid=conf.GRID, bounds=bounds):
"""Returns center of square for given worker"""
per_column = int((grid[0] * grid[1]) / grid[0])
column = worker_no % per_column
row = int(worker_no / per_column)
part_lat = (bounds.south - bounds.north) / grid[0]
part_lon = (bounds.east - bounds.west) / grid[1]
start_lat = bounds.north + part_lat * row + part_lat / 2
start_lon = bounds.west + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""range for floats, also capable of iterating backwards"""
if start > end:
while end <= start:
yield start
start += -step
else:
while start <= end:
yield start
start += step
def get_gains(dist=70):
"""Returns lat and lon gain
Gain is space between circles.
"""
start = Point(*bounds.center)
base = dist * sqrt(3)
height = base * sqrt(3) / 2
dis_a = distance(meters=base)
dis_h = distance(meters=height)
lon_gain = dis_a.destination(point=start, bearing=90).longitude
lat_gain = dis_h.destination(point=start, bearing=0).latitude
return abs(start.latitude - lat_gain), abs(start.longitude - lon_gain)
def round_coords(point, precision, _round=round):
return _round(point[0], precision), _round(point[1], precision)
def get_bootstrap_points(bounds):
coords = []
if bounds.multi:
for b in bounds.polygons:
coords.extend(get_bootstrap_points(b))
return coords
lat_gain, lon_gain = get_gains(conf.BOOTSTRAP_RADIUS)
west, east = bounds.west, bounds.east
bound = bool(bounds)
for map_row, lat in enumerate(
float_range(bounds.south, bounds.north, lat_gain)
):
row_start_lon = west
if map_row % 2 != 0:
row_start_lon -= 0.5 * lon_gain
for lon in float_range(row_start_lon, east, lon_gain):
point = lat, lon
if not bound or point in bounds:
coords.append(point)
shuffle(coords)
return coords
def get_device_info(account):
device_info = {'brand': 'Apple',
'device': 'iPhone',
'manufacturer': 'Apple'}
try:
if account['iOS'].startswith('1'):
device_info['product'] = 'iOS'
else:
device_info['product'] = 'iPhone OS'
device_info['hardware'] = account['model'] + '\x00'
device_info['model'] = IPHONES[account['model']] + '\x00'
except (KeyError, AttributeError):
account = generate_device_info(account)
return get_device_info(account)
device_info['version'] = account['iOS']
device_info['device_id'] = account['id']
return device_info
def generate_device_info(account):
ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1', '8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1')
ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5')
# 10.0 was only for iPhone 7 and 7 Plus, and is rare
ios10 = ('10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3')
devices = tuple(IPHONES.keys())
account['model'] = choice(devices)
account['id'] = uuid4().hex
if account['model'] in ('iPhone9,1', 'iPhone9,2',
'iPhone9,3', 'iPhone9,4'):
account['iOS'] = choice(ios10)
elif account['model'] in ('iPhone8,1', 'iPhone8,2'):
account['iOS'] = choice(ios9 + ios10)
elif account['model'] == 'iPhone8,4':
# iPhone SE started on 9.3
account['iOS'] = choice(('9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') + ios10)
else:
account['iOS'] = choice(ios8 + ios9 + ios10)
return account
def create_account_dict(account):
if isinstance(account, (tuple, list)):
length = len(account)
else:
raise TypeError('Account must be a tuple or list.')
if length not in (1, 3, 4, 6):
raise ValueError('Each account should have either 3 (account info only) or 6 values (account and device info).')
if length in (1, 4) and (not conf.PASS or not conf.PROVIDER):
raise ValueError('No default PASS or PROVIDER are set.')
entry = {}
entry['username'] = account[0]
if length == 1 or length == 4:
entry['password'], entry['provider'] = conf.PASS, conf.PROVIDER
else:
entry['password'], entry['provider'] = account[1:3]
if length == 4 or length == 6:
entry['model'], entry['iOS'], entry['id'] = account[-3:]
else:
entry = generate_device_info(entry)
entry['time'] = 0
entry['captcha'] = False
entry['banned'] = False
return entry
def accounts_from_config(pickled_accounts=None):
accounts = {}
for account in conf.ACCOUNTS:
username = account[0]
if pickled_accounts and username in pickled_accounts:
accounts[username] = pickled_accounts[username]
if len(account) == 3 or len(account) == 6:
accounts[username]['password'] = account[1]
accounts[username]['provider'] = account[2]
else:
accounts[username] = create_account_dict(account)
return accounts
def accounts_from_csv(new_accounts, pickled_accounts):
accounts = {}
for username, account in new_accounts.items():
if pickled_accounts:
pickled_account = pickled_accounts.get(username)
if pickled_account:
if pickled_account['password'] != account['password']:
del pickled_account['password']
account.update(pickled_account)
accounts[username] = account
continue
account['provider'] = account.get('provider') or 'ptc'
if not all(account.get(x) for x in ('model', 'iOS', 'id')):
account = generate_device_info(account)
account['time'] = 0
account['captcha'] = False
account['banned'] = False
accounts[username] = account
return accounts
def get_current_hour(now=None, _time=time):
now = now or _time()
return round(now - (now % 3600))
def time_until_time(seconds, seen=None, _time=time):
current_seconds = seen or _time() % 3600
if current_seconds > seconds:
return seconds + 3600 - current_seconds
elif current_seconds + 3600 < seconds:
return seconds - 3600 - current_seconds
else:
return seconds - current_seconds
def get_address():
if conf.MANAGER_ADDRESS:
return conf.MANAGER_ADDRESS
if platform == 'win32':
return r'\\.\pipe\monocle'
if hasattr(socket, 'AF_UNIX'):
return join(conf.DIRECTORY, 'monocle.sock')
return ('127.0.0.1', 5001)
def load_pickle(name, raise_exception=False):
location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
try:
with open(location, 'rb') as f:
return pickle_load(f)
except (FileNotFoundError, EOFError):
if raise_exception:
raise FileNotFoundError
else:
return None
def dump_pickle(name, var):
folder = join(conf.DIRECTORY, 'pickles')
try:
mkdir(folder)
except FileExistsError:
pass
except Exception as e:
raise OSError("Failed to create 'pickles' folder, please create it manually") from e
location = join(folder, '{}.pickle'.format(name))
with open(location, 'wb') as f:
pickle_dump(var, f, HIGHEST_PROTOCOL)
def load_accounts():
pickled_accounts = load_pickle('accounts')
if conf.ACCOUNTS_CSV:
accounts = load_accounts_csv()
if pickled_accounts and set(pickled_accounts) == set(accounts):
return pickled_accounts
else:
accounts = accounts_from_csv(accounts, pickled_accounts)
elif conf.ACCOUNTS:
if pickled_accounts and set(pickled_accounts) == set(acc[0] for acc in conf.ACCOUNTS):
return pickled_accounts
else:
accounts = accounts_from_config(pickled_accounts)
else:
raise ValueError('Must provide accounts in a CSV or your config file.')
dump_pickle('accounts', accounts)
return accounts
def load_accounts_csv():
csv_location = join(conf.DIRECTORY, conf.ACCOUNTS_CSV)
with open(csv_location, 'rt') as f:
accounts = {}
reader = DictReader(f)
for row in reader:
accounts[row['username']] = dict(row)
return accounts
def randomize_point(point, amount=0.0003, randomize=uniform):
'''Randomize point, by up to ~47 meters by default.'''
lat, lon = point
return (
randomize(lat - amount, lat + amount),
randomize(lon - amount, lon + amount)
)
def calc_pokemon_level(cp_multiplier):
if cp_multiplier < 0.734:
pokemon_level = (58.35178527 * cp_multiplier * cp_multiplier - 2.838007664 * cp_multiplier + 0.8539209906)
else:
pokemon_level = 171.0112688 * cp_multiplier - 95.20425243
pokemon_level = int((round(pokemon_level) * 2) / 2)
return pokemon_level
| {
"content_hash": "5f9edf177cdfa2e9b0989cb8e6adb2ad",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 120,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5942669172932331,
"repo_name": "tallypokemap/Monocle",
"id": "b22ad895c372fd003234925d28b38498d8283d26",
"size": "10640",
"binary": false,
"copies": "1",
"ref": "refs/heads/local",
"path": "monocle/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3415"
},
{
"name": "HTML",
"bytes": "24630"
},
{
"name": "JavaScript",
"bytes": "22039"
},
{
"name": "PLpgSQL",
"bytes": "1059"
},
{
"name": "Python",
"bytes": "345490"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
name='mrnag',
version='1.0.0.dev0',
description='Utilities for aggregating information about open merge requests.',
long_description=long_description,
packages=find_packages(exclude=['tests*']),
install_requires=[
'pendulum==2.1.0',
'PyYaml==5.3.1',
'requests==2.23.0'
],
test_suite='tests'
)
| {
"content_hash": "29a5438e38075ef5d495c0c01abde286",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 83,
"avg_line_length": 24,
"alnum_prop": 0.6354166666666666,
"repo_name": "hg-jt/mrnag",
"id": "672df81d75ad81e39b0da5ed3f153dda2ba250e7",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "435"
},
{
"name": "Python",
"bytes": "15944"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from six import PY2
import sys
import time
if PY2:
import exceptions
else:
import builtins as exceptions
from robot import utils
from objecttoreturn import ObjectToReturn
class ExampleLibrary:
def print_(self, msg, stream='stdout'):
"""Print given message to selected stream (stdout or stderr)"""
out_stream = getattr(sys, stream)
out_stream.write(utils.unic(msg))
def print_n_times(self, msg, count, delay=0):
"""Print given message n times"""
for i in range(int(count)):
print(msg)
self._sleep(delay)
def print_many(self, *msgs):
"""Print given messages"""
for msg in msgs:
print(msg, end=' ')
print
def print_to_stdout_and_stderr(self, msg):
sys.stdout.write('stdout: ' + msg)
sys.stderr.write('stderr: ' + msg)
def print_to_python_and_java_streams(self):
import ExampleJavaLibrary
print('*INFO* First message to Python')
getattr(ExampleJavaLibrary(), 'print')('*INFO* Second message to Java')
print('*INFO* Last message to Python')
def single_line_doc(self):
"""One line keyword documentation."""
pass
def multi_line_doc(self):
"""Only the first line of a multi line keyword doc should be logged.
Thus for example this text here should not be there
and neither should this.
"""
pass
def exception(self, name, msg="", class_only=False):
"""Raise exception with given name and message"""
exception = getattr(exceptions, name)
if class_only:
raise exception
raise exception(msg)
def external_exception(self, name, msg):
ObjectToReturn('failure').exception(name, msg)
def return_string_from_library(self,string='This is a string from Library'):
return string
def return_list_from_library(self, *args):
return list(args)
def return_three_strings_from_library(self, one='one', two='two', three='three'):
return one, two, three
def return_object(self, name='<noname>'):
return ObjectToReturn(name)
def check_object_name(self, object, name):
assert object.name == name, '%s != %s' % (object.name, name)
def set_object_name(self, object, name):
object.name = name
def set_attribute(self, name, value):
setattr(self, utils.normalize(name), utils.normalize(value))
def get_attribute(self, name):
return getattr(self, utils.normalize(name))
def check_attribute(self, name, expected):
try:
actual = getattr(self, utils.normalize(name))
except AttributeError:
raise AssertionError("Attribute '%s' not set" % name)
if not utils.eq(actual, expected):
raise AssertionError("Attribute '%s' was '%s', expected '%s'"
% (name, actual, expected))
def check_attribute_not_set(self, name):
if hasattr(self, utils.normalize(name)):
raise AssertionError("Attribute '%s' should not be set" % name)
def backslashes(self, count=1):
return '\\' * int(count)
def read_and_log_file(self, path, binary=False):
mode = binary and 'rb' or 'r'
_file = open(path, mode)
print(_file.read())
_file.close()
def print_control_chars(self):
print('\033[31mRED\033[m\033[32mGREEN\033[m')
def long_message(self, line_length, line_count, chars='a'):
line_length = int(line_length)
line_count = int(line_count)
msg = chars*line_length + '\n'
print(msg*line_count)
def loop_forever(self, no_print=False):
i = 0
while True:
i += 1
self._sleep(1)
if not no_print:
print('Looping forever: %d' % i)
def write_to_file_after_sleeping(self, path, sec, msg=None):
f = open(path, 'w')
try:
self._sleep(sec)
f.write(msg or 'Slept %s seconds' % sec)
finally: # may be killed by timeouts
f.close()
def sleep_without_logging(self, timestr):
seconds = utils.timestr_to_secs(timestr)
self._sleep(seconds)
def _sleep(self, seconds):
endtime = time.time() + float(seconds)
while True:
remaining = endtime - time.time()
if remaining <= 0:
break
time.sleep(min(remaining, 0.1))
def return_consumable_iterable(self, *values):
return iter(values)
def return_list_subclass(self, *values):
return _MyList(values)
def return_unrepresentable_objects(self, identifier=None, just_one=False):
class FailiningStr(object):
def __init__(self, identifier=identifier):
self.identifier = identifier
def __str__(self):
raise RuntimeError
def __unicode__(self):
raise UnicodeError
class FailiningUnicode(object):
def __init__(self, identifier=identifier):
self.identifier = identifier
def __str__(self):
raise ValueError
def __unicode__(self):
raise ValueError
if just_one:
return FailiningStr()
return FailiningStr(), FailiningUnicode()
def fail_with_suppressed_exception_name(self, msg):
raise MyException(msg)
class _MyList(list):
pass
class MyException(AssertionError):
ROBOT_SUPPRESS_NAME = True
| {
"content_hash": "f29973fbb647ceb95c4df01f4e4827e8",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 85,
"avg_line_length": 30.28108108108108,
"alnum_prop": 0.5885398072117101,
"repo_name": "userzimmermann/robotframework",
"id": "c31afc9605587874a7efb7505269376325402ea5",
"size": "5602",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "atest/testresources/testlibs/ExampleLibrary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140949"
},
{
"name": "Java",
"bytes": "59815"
},
{
"name": "JavaScript",
"bytes": "160761"
},
{
"name": "Python",
"bytes": "2179296"
},
{
"name": "RobotFramework",
"bytes": "2033202"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
"""Dig through jobs/FOO.env, and execute a janitor pass for each of the project"""
import argparse
import json
import os
import re
import subprocess
import sys
ORIG_CWD = os.getcwd() # Checkout changes cwd
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def parse_project(path):
"""Parse target env file and return GCP project name."""
with open(path, 'r') as fp:
env = fp.read()
match = re.search(r'PROJECT=([^\n"]+)', env)
if match:
project = match.group(1)
return project
return None
def clean_project(project, hours=24, dryrun=False, ratelimit=None):
"""Execute janitor for target GCP project """
# Multiple jobs can share the same project, woooo
if project in CHECKED:
return
CHECKED.add(project)
cmd = ['python', test_infra('boskos/janitor/janitor.py'), '--project=%s' % project]
cmd.append('--hour=%d' % hours)
if dryrun:
cmd.append('--dryrun')
if ratelimit:
cmd.append('--ratelimit=%d' % ratelimit)
try:
check(*cmd)
except subprocess.CalledProcessError:
FAILED.append(project)
BLACKLIST = [
'kubernetes-scale', # Let it's up/down job handle the resources
'k8s-scale-testing', # As it can be running some manual experiments
'k8s-jkns-e2e-gce-f8n-1-7', # federation projects should use fedtidy to clean up
'k8s-jkns-e2e-gce-f8n-1-8', # federation projects should use fedtidy to clean up
]
PR_PROJECTS = {
# k8s-jkns-pr-bldr-e2e-gce-fdrtn
# k8s-jkns-pr-cnry-e2e-gce-fdrtn
# cleans up resources older than 3h
# which is more than enough for presubmit jobs to finish.
'k8s-jkns-pr-gce': 3,
'k8s-jkns-pr-gce-bazel': 3,
'k8s-jkns-pr-gce-etcd3': 3,
'k8s-jkns-pr-gci-gce': 3,
'k8s-jkns-pr-gci-gke': 3,
'k8s-jkns-pr-gci-kubemark': 3,
'k8s-jkns-pr-gke': 3,
'k8s-jkns-pr-kubeadm': 3,
'k8s-jkns-pr-kubemark': 3,
'k8s-jkns-pr-node-e2e': 3,
'k8s-jkns-pr-gce-gpus': 3,
'k8s-gke-gpu-pr': 3,
}
SCALE_PROJECT = {
'k8s-presubmit-scale': 3,
}
def check_predefine_jobs(jobs, ratelimit):
"""Handle predefined jobs"""
for project, expire in jobs.iteritems():
clean_project(project, hours=expire, ratelimit=ratelimit)
def check_ci_jobs():
"""Handle CI jobs"""
with open(test_infra('jobs/config.json')) as fp:
config = json.load(fp)
match_re = re.compile(r'--gcp-project=(.+)')
for value in config.values():
clean_hours = 24
found = None
for arg in value.get('args', []):
# lifetime for soak cluster should be 7 days
# clean up everything older than 10 days to prevent leak
if '--soak' in arg:
clean_hours = 24 * 10
mat = match_re.match(arg)
if not mat:
continue
project = mat.group(1)
if any(b in project for b in BLACKLIST):
print >>sys.stderr, 'Project %r is blacklisted in ci-janitor' % project
continue
if project in PR_PROJECTS or project in SCALE_PROJECT:
continue # CI janitor skips all PR jobs
found = project
if found:
clean_project(found, clean_hours)
# Hard code node-ci project here
clean_project('k8s-jkns-ci-node-e2e')
def main(mode, ratelimit, projects, age):
"""Run janitor for each project."""
if mode == 'pr':
check_predefine_jobs(PR_PROJECTS, ratelimit)
elif mode == 'scale':
check_predefine_jobs(SCALE_PROJECT, ratelimit)
elif mode == 'custom':
projs = str.split(projects, ',')
for proj in projs:
clean_project(proj.strip(), hours=age, ratelimit=ratelimit)
else:
check_ci_jobs()
# Summary
print 'Janitor checked %d project, %d failed to clean up.' % (len(CHECKED), len(FAILED))
if FAILED:
print >>sys.stderr, 'Failed projects: %r' % FAILED
exit(1)
if __name__ == '__main__':
# keep some metric
CHECKED = set()
FAILED = []
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--mode', default='ci', choices=['ci', 'pr', 'scale', 'custom'],
help='Which type of projects to clear')
PARSER.add_argument(
'--ratelimit', type=int,
help='Max number of resources to clear in one gcloud delete call (passed into janitor.py)')
PARSER.add_argument(
'--projects', type=str,
help='Comma separated list of projects to clean up. Only applicable in custom mode.')
PARSER.add_argument(
'--age', type=int,
help='Expiry age for projects, in hours. Only applicable in custom mode.')
ARGS = PARSER.parse_args()
main(ARGS.mode, ARGS.ratelimit, ARGS.projects, ARGS.age)
| {
"content_hash": "34d6dd3486e345dd4f5d0a123eff0b15",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 99,
"avg_line_length": 31.765822784810126,
"alnum_prop": 0.6096832038254633,
"repo_name": "mindprince/test-infra",
"id": "a86e5b38d7ef71de65d329cc898342c77bce0a39",
"size": "5717",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scenarios/kubernetes_janitor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "27310"
},
{
"name": "Dockerfile",
"bytes": "55171"
},
{
"name": "Go",
"bytes": "4139171"
},
{
"name": "HTML",
"bytes": "75973"
},
{
"name": "JavaScript",
"bytes": "215316"
},
{
"name": "Makefile",
"bytes": "63432"
},
{
"name": "Python",
"bytes": "1014707"
},
{
"name": "Roff",
"bytes": "5462"
},
{
"name": "Shell",
"bytes": "109573"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
} |
import unittest
import checksieve
class TestSetFlag(unittest.TestCase):
def test_setflag(self):
sieve = '''
require ["imap4flags"];
setflag "\\Seen";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_setflag_with_variable(self):
sieve = '''
require ["imap4flags", "fileinto"];
if header :contains "from" "boss@frobnitzm.example.edu" {
setflag "flagvar" "\\Flagged";
fileinto :flags "${flagvar}" "INBOX.From Boss";
}
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_setflag_no_args(self):
sieve = '''
require ["imap4flags"];
setflag;
'''
self.assertTrue(checksieve.parse_string(sieve, True))
class TestAddFlag(unittest.TestCase):
def test_addflag(self):
sieve = '''
require ["imap4flags"];
addflag "\\Seen";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_addflag_with_var(self):
sieve = '''
require ["imap4flags"];
addflag "flagvar" ["\\Deleted", "\\Answered"];
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_addflag_no_args(self):
sieve = '''
require ["imap4flags"];
addflag;
'''
self.assertTrue(checksieve.parse_string(sieve, True))
class TestRemoveFlag(unittest.TestCase):
def test_removeflag(self):
sieve = '''
require ["imap4flags"];
removeflag "\\Seen";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_removeflag_with_var(self):
sieve = '''
require ["imap4flags"];
removeflag "flagvar" ["\\Deleted", "\\Answered"];
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_removeflag_no_args(self):
sieve = '''
require ["imap4flags"];
removeflag;
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "099e45544cae508a7ce83ff20d5b1ed5",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 65,
"avg_line_length": 26.860759493670887,
"alnum_prop": 0.5645617342130066,
"repo_name": "dburkart/mail-sieve-verifier",
"id": "6add88d66ad690ed420573156a485386a68d931e",
"size": "2122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/5232/actions_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "230157"
},
{
"name": "Lex",
"bytes": "4054"
},
{
"name": "Makefile",
"bytes": "1745"
},
{
"name": "Python",
"bytes": "46955"
}
],
"symlink_target": ""
} |
from .cmd_base import DoitCmdBase, check_tasks_exist
from .cmd_base import subtasks_iter
import os
class ResetDep(DoitCmdBase):
name = "reset-dep"
doc_purpose = ("recompute and save the state of file dependencies without "
"executing actions")
doc_usage = "[TASK ...]"
cmd_options = ()
doc_description = """
This command allows to recompute the information on file dependencies
(timestamp, md5sum, ... depending on the ``check_file_uptodate`` setting), and
save this in the database, without executing the actions.
The command run on all tasks by default, but it is possible to specify a list
of tasks to work on.
This is useful when the targets of your tasks already exist, and you want doit
to consider your tasks as up-to-date. One use-case for this command is when you
change the ``check_file_uptodate`` setting, which cause doit to consider all
your tasks as not up-to-date. It is also useful if you start using doit while
some of your data as already been computed, or when you add a file dependency
to a task that has already run.
"""
def _execute(self, pos_args=None):
filter_tasks = pos_args
# dict of all tasks
tasks = dict([(t.name, t) for t in self.task_list])
# select tasks that command will be applied to
if filter_tasks:
# list only tasks passed on command line
check_tasks_exist(tasks, filter_tasks)
# get task by name
task_list = []
for name in filter_tasks:
task = tasks[name]
task_list.append(task)
task_list.extend(subtasks_iter(tasks, task))
else:
task_list = self.task_list
write = self.outstream.write
for task in task_list:
# Get these now because dep_manager.get_status will remove the task
# from the db if the checker changed.
values = self.dep_manager.get_values(task.name)
result = self.dep_manager.get_result(task.name)
missing_deps = [dep for dep in task.file_dep
if not os.path.exists(dep)]
if len(missing_deps) > 0:
write("failed {} (Dependent file '{}' does not "
"exist.)\n".format(task.name, "', '".join(missing_deps)))
continue
res = self.dep_manager.get_status(task, tasks)
# An 'up-to-date' status means that it is useless to recompute the
# state: file deps and targets exists, the state has not changed,
# there is nothing more to do.
if res.status == 'up-to-date':
write("skip {}\n".format(task.name))
continue
task.values = values
self.dep_manager.save_success(task, result_hash=result)
write("processed {}\n".format(task.name))
self.dep_manager.close()
| {
"content_hash": "26c11d7da1c2a22ef1c966cf7e3b5d97",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 38.973333333333336,
"alnum_prop": 0.6117003079028396,
"repo_name": "JohannesBuchner/doit",
"id": "fffa16b90e1b820e90e55ef125dc9f0a572377c5",
"size": "2923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doit/cmd_resetdep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "442"
},
{
"name": "Python",
"bytes": "541824"
}
],
"symlink_target": ""
} |
from collections import Counter
class Solution(object):
@staticmethod
def majorityElement(nums):
"""
majority_element == PEP8 (forced mixedCase by LeetCode)
:type nums: List[int]
:rtype: int
"""
return Counter(nums).most_common(1)[0][0]
| {
"content_hash": "766275de6a945a45a00caecb38cb7162",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.6033898305084746,
"repo_name": "the-zebulan/LeetCode",
"id": "7b22500ae9f819dbd843ef81d9d15197134d6e1c",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Easy/majority_element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16299"
}
],
"symlink_target": ""
} |
"""
Django settings for django_event_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's5yqa0o(=dvl36fi*pp%r!6=!2(m^51r)i%gu1o#w9^tb=nbf3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_event',
'example',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_event_example.urls'
WSGI_APPLICATION = 'django_event_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CACHES = {
'default': '',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
CELERY_TIMEZONE = 'Europe/Moscow'
BROKER_URL = (
'redis://localhost:6379/0'
)
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_DEFAULT_QUEUE = 'default'
# BROKER_URL = (
# 'amqp://admim:admin@localhost:5672/default'
# )
# CELERY_RESULT_BACKEND = 'amqp'
# CELERY_DEFAULT_QUEUE = 'default'
from .celery import *
DJANGO_EVENT = {
# 'BACKEND': 'rabbitmq',
# 'BACKEND_OPTIONS': {
# 'HOST': 'localhost',
# 'PORT': 5672,
# 'VIRTUAL_HOST': 'default',
# 'USERNAME': 'admin',
# 'PASSWORD': 'admin',
# 'QUEUE_NAME': 'default',
# },
'BACKEND': 'redis',
'BACKEND_OPTIONS': {
'HOST': 'localhost',
'PORT': 6379,
'PASSWORD': '',
'DB': 0,
},
'TORNADO_OPTIONS': {
'HOST': '',
'PORT': 8989
},
'LISTENERS': {
'example_event_type':
'django_event.subscriber.listeners.SendMessageListener',
},
'STORE_DAYS': 7,
}
| {
"content_hash": "a220ff9964b33973efcbe9d53846cad6",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 71,
"avg_line_length": 22.594202898550726,
"alnum_prop": 0.6545862732520846,
"repo_name": "ailove-dev/django-event",
"id": "8ddc7be3169ae125f39b43673b7d86ab22190dda",
"size": "3118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/django_event_example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "93832"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns("",
url(r"^$", "changesets.views.changesets", name="changeset_index"),
)
| {
"content_hash": "964fc849c4025e656a91f25ed0b8c1e5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 70,
"avg_line_length": 28.2,
"alnum_prop": 0.6950354609929078,
"repo_name": "hbussell/pinax-tracker",
"id": "748cc1bf5a42708b032a2eb2c3ef34d4cfb9f9c2",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/changesets/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "266946"
},
{
"name": "Python",
"bytes": "270982"
},
{
"name": "Shell",
"bytes": "8773"
}
],
"symlink_target": ""
} |
"""
sphinxcontrib.gravatar.nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: tell-k <ffk2005@gmail.com>
:copyright: tell-k. All Rights Reserved.
"""
from __future__ import (
division,
print_function,
absolute_import,
unicode_literals,
)
import os
import posixpath
import hashlib
from docutils import nodes
from sphinx.util.osutil import ensuredir
from sphinx.errors import SphinxError
from sphinxcontrib.gravatar.compat import urlopen, urlencode
class gravatar_image(nodes.General, nodes.Inline, nodes.Element):
pass
class GravatarError(SphinxError):
category = 'sphinxcontrib.gravatar error'
def _hash(email):
""" Creating the hash for gravatar.
:refs: https://en.gravatar.com/site/implement/hash/
"""
return hashlib.md5(email.encode('utf-8')).hexdigest()
def build_gravatar_image_url(email, options):
""" Build gravatar image url.
:refs: https://en.gravatar.com/site/implement/images/
"""
query_params = {}
url = "http://www.gravatar.com/avatar/{0}".format(_hash(email))
if options.get("size"):
query_params.update({'s': options.get("size")})
if options.get("default"):
query_params.update({'d': options.get("default")})
if options.get("force_default"):
query_params.update({'f': 'y'})
if options.get("rating"):
query_params.update({'r': options.get("rating")})
query = "?" + urlencode(query_params) if query_params else ''
return url + query
def build_gravatar_profile_url(email):
""" Build gravatar profile url.
:refs: https://en.gravatar.com/site/implement/profile/
"""
return "http://www.gravatar.com/{0}".format(_hash(email))
def get_image_filename(self, node, prefix='gravatar'):
""" Get path of output file. """
opt = "-".join([str(v) for v in node["options"].values()])
if opt:
opt = "-" + opt
fname = '{0}-{1}{2}.png'
fname = fname.format(prefix, _hash(node['email']), opt)
if hasattr(self.builder, 'imgpath'):
# HTML
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = os.path.join(self.builder.outdir, '_images', fname)
else:
# LaTeX
relfn = fname
outfn = os.path.join(self.builder.outdir, fname)
if os.path.isfile(outfn):
return relfn, outfn
ensuredir(os.path.dirname(outfn))
return relfn, outfn
def save_gravatar_image(outfn, node):
""" Save gravatar image file to local. """
if not node['force_refresh'] and os.path.exists(outfn):
return
fd = urlopen(
build_gravatar_image_url(node['email'], node['options'])
)
if not hasattr(fd, 'getcode') or fd.getcode() == 200:
with open(outfn, 'wb') as fp:
fp.write(fd.read())
else:
msg = "Can't fecth gravatar image for '{0}'"
msg = msg.format(node['username'])
raise GravatarError(msg)
def html_visit_gravatar_image(self, node):
fname, outfn = get_image_filename(self, node)
save_gravatar_image(outfn, node)
linktag_format = '<a href="{0}" class="gravatar-link">{1}</a>'
imgtag_format = '<img src="{0}" alt="{1}" class="{2}" />'
imgtag = imgtag_format.format(fname, node['alt'], node['css_class'])
if node['unlink']:
self.body.append(imgtag)
else:
self.body.append(linktag_format.format(
node['target'] or build_gravatar_profile_url(node['email']),
imgtag,
))
raise nodes.SkipNode
def latex_visit_gravatar_image(self, node):
fname, outfn = get_image_filename(self, node)
save_gravatar_image(outfn, node)
if fname is not None:
self.body.append('\\includegraphics{%s}' % fname)
raise nodes.SkipNode
| {
"content_hash": "f1625117acb2b769be2fab92a83b98b3",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 72,
"avg_line_length": 27.364963503649633,
"alnum_prop": 0.618031475060016,
"repo_name": "tell-k/sphinxcontrib-gravatar",
"id": "6140533439f8388d19dce37be3a43d41390c34bd",
"size": "3773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinxcontrib/gravatar/nodes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "6826"
},
{
"name": "Python",
"bytes": "50818"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from mapi.importers import *
class Command(BaseCommand):
help = 'Imports all stations and stores them to database'
def handle(self, *args, **options):
import_stations()
| {
"content_hash": "eefc45058e5e833a836ad634595e95e3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 65,
"avg_line_length": 26.3,
"alnum_prop": 0.7072243346007605,
"repo_name": "timokoola/mjuna",
"id": "2c8a1b66ad219c25dd8bdfc115ed7a9c8aea6d56",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mjuna/mapi/management/commands/import_stations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18192"
}
],
"symlink_target": ""
} |
import os
import math
import argparse
from PIL import Image, ImageSequence, ImageColor, ImageOps
import numpy as np
#
# First pass to read mouse light data & export into CATMAID tiles.
# WARNING: Python tuples are (x,y,z) but numpy arrays are (z,y,x)
#
class MouseStack:
def __init__(self, path, globalsize, tilesize, channels):
self.path = path.rstrip('/')
self.globalsize = globalsize
self.tilesize = tilesize
self.channels = channels
# Maximum directory depth (number of levels in the scale pyramid)
self.max_depth = int(math.ceil(math.log(max(
self.globalsize[0]/self.tilesize[0],
self.globalsize[1]/self.tilesize[1],
self.globalsize[2]/self.tilesize[2]
),2)))
def toBitString(self, i, depth=None):
"""Generate a bitstring for 'i' for up to self.max_depth bytes."""
if depth == None:
depth = self.max_depth
s = []
for b in reversed(range(depth)):
s.append((i & 1<<b)>>b)
return s
def getTilePath(self, x, y, z, channel, scale=0):
"""Get the tile path associated with a given X,Y,Z."""
# Total number of directories to traverse
# Reduce by scale requested
steps = self.max_depth - scale
tile_x = x // (self.tilesize[0] * 2**scale)
tile_y = y // (self.tilesize[1] * 2**scale)
tile_z = z // (self.tilesize[2] * 2**scale)
levels = []
for (xb,yb,zb) in zip(self.toBitString(tile_x, steps),
self.toBitString(tile_y, steps),
self.toBitString(tile_z, steps)):
total = zb*4+yb*2+xb
levels.append(total+1)
path = self.path + '/' + '/'.join(str(x) for x in levels) + "/default.%d.tif" % channel
return path
#
# Data description:
# Colums: 40768/637=64
# Rows: 25792/403=64
# Layers: 14272/233=61.25321 (62)
# Channels: 2 [0, 1]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', default='/nobackup/mousebrainmicro/2014-11-24-Stitch1/', help='Data directory')
parser.add_argument('--globalsize', nargs=3, type=int, metavar=('X', 'Y', 'Z'), help='Total size of mosaic', default=[43520, 38656, 16384])
parser.add_argument('--tilesize', nargs=3, type=int, metavar=('X', 'Y', 'Z'), help='Size of tif tiles', default=[680, 604, 256])
parser.add_argument('--channels', nargs='+', type=int, help="Channels to processs", default=[0, 1])
parser.add_argument('--slices', nargs='+', type=int, default=None, help="Specific Z-slab to process [Default: (all)]")
parser.add_argument('--scales', nargs='+', type=int, default=[0], help="Scales to generate [Default: (all)]")
parser.add_argument('--output', default="/tier2/bock/nobackup/tiles/cluster/mouse/test1/", help="Path to output tiles to.")
args = parser.parse_args()
stack = MouseStack(args.data, args.globalsize, args.tilesize, args.channels)
# TODO: Encode per-channel levels into the parser arguments?
# Mike: "0 is green, 1 is red (or even better - magenta)"
# levels[channel] = (black,white)
levels = {0 : (13200, 32000),
1 : (11200, 30000)}
# Color for each channel
color = {0 : (0,255,0),
1 : (255,0,255)}
# Threshold for data (don't emit if no data above this)
data_output_threshold = 2**5
if args.slices is not None:
slices = sorted(args.slices)
else:
# Generate full list of slices
slices = list(range(int(math.ceil(stack.globalsize[2]/stack.tilesize[2]))))
# Note: Concept of scales is incompatible with CATMAID -- CATMAID uses layers & does not scale in Z yet
if args.scales is not None:
scales = sorted(args.scales, reverse=True)
# Number of smaller tiles into a larger tile
tiles_per_col = 2
tiles_per_row = 2
print(scales)
for scale in scales:
for slicen in slices:
for bigrow in range(stack.globalsize[1] // (stack.tilesize[1]*tiles_per_row*2**scale)):
for bigcol in range(stack.globalsize[0] // (stack.tilesize[0]*tiles_per_col*2**scale)):
for channel in args.channels:
slabdata = np.zeros([256, 604*tiles_per_row, 680*tiles_per_row], dtype=np.uint16)
for smallrow in range(tiles_per_row):
for smallcol in range(tiles_per_col):
#filename = '/nobackup/mousebrainmicro/2014-11-24-Stitch1/5/8/3/1/8/5/default.%d.tif' % channel
col = (bigcol*tiles_per_col + smallcol) * stack.tilesize[0]
row = (bigrow*tiles_per_row + smallrow) * stack.tilesize[1]
z = slicen * stack.tilesize[2]
filename = stack.getTilePath(col,row,z,channel,scale)
if os.path.exists(filename):
print("Reading %s ..." % filename)
im = Image.open(filename)
# Read image data
# TODO: Consider using skimage.io.* instead?
#tiledata = np.empty([223,403,637], dtype=np.uint16)
frameNumber = 0
for frame in ImageSequence.Iterator(im):
imarray = np.array(frame)
slabdata[frameNumber,
smallrow*stack.tilesize[1]:(smallrow+1)*stack.tilesize[1],
smallcol*stack.tilesize[0]:(smallcol+1)*stack.tilesize[0]] = imarray
#print(frameNumber,
# smallrow*stack.tilesize[1],(smallrow+1)*stack.tilesize[1],
# smallcol*stack.tilesize[0],(smallcol+1)*stack.tilesize[0])
#print(smallcol, smallrow)
#print (imarray.shape)
#print (slabdata.shape)
#print("Non zero: %d" % np.count_nonzero(slabdata))
frameNumber += 1
else:
print("File does not exists: %s" % filename)
# Export tiles to disk
# Is there any data? Skip if not..
if not slabdata.any():
print("No data in slab! Skipping output.")
continue
# Convert the entire array down to 8bit w/ fixed contrast values
# TODO: Figure out how to do this with fewer operations?
if channel in levels:
(black, white) = levels[channel]
ratio = 255 / (white - black)
slabdata[slabdata < black] = black
slabdata[slabdata > white] = white
slab8bit = ((slabdata - black) * ratio).astype(np.uint8)
else:
print("No level information found. Doing naive conversion.")
slab8bit = (slabdata/8).astype(np.uint8)
for frame in range(stack.tilesize[2]):
if np.nanmax(slab8bit[frame]) < data_output_threshold:
print("No data in frame %d! Skipping." % frame)
continue
z = slicen*stack.tilesize[2] + frame
col = bigcol
row = bigrow
im = Image.fromarray(slab8bit[frame])
# Inefficient way to colorize...
im = ImageOps.colorize(im, black=(0,0,0), white=color[channel]).convert('P', palette=Image.ADAPTIVE, colors=256)
#print("Node ", im.mode)
outpath = "%s/c%d/%d/%d/%d" % (args.output, channel, scale, z, row)
if not os.path.exists(outpath):
os.makedirs(outpath)
outfile = "%s/%s.png" % (outpath,col)
#im.save("/tmp/out/%d.%d.%d.%d.png" % (z, col, row, channel))
print(outfile)
im.save(outfile, optimize=1)
if __name__ == "__main__":
main()
| {
"content_hash": "5e5f7645fbd2cb8a99c3ff48cf77ab34",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 141,
"avg_line_length": 43.068627450980394,
"alnum_prop": 0.4988618256316868,
"repo_name": "openconnectome/open-connectome",
"id": "eee39e1ca8325786800f8237e05946d029e8cf6d",
"size": "8786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/ingest/nelson/nelson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43100"
},
{
"name": "C++",
"bytes": "23724"
},
{
"name": "CSS",
"bytes": "53255"
},
{
"name": "HTML",
"bytes": "142332"
},
{
"name": "JavaScript",
"bytes": "303249"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "1409968"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
"""
Nello.io lock platform.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/lock.nello/
"""
from itertools import filterfalse
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.lock import (LockDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_PASSWORD, CONF_USERNAME)
REQUIREMENTS = ['pynello==1.5.1']
_LOGGER = logging.getLogger(__name__)
ATTR_ADDRESS = 'address'
ATTR_LOCATION_ID = 'location_id'
EVENT_DOOR_BELL = 'nello_bell_ring'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Nello lock platform."""
from pynello import Nello
nello = Nello(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))
add_devices([NelloLock(lock) for lock in nello.locations], True)
class NelloLock(LockDevice):
"""Representation of a Nello lock."""
def __init__(self, nello_lock):
"""Initialize the lock."""
self._nello_lock = nello_lock
self._device_attrs = None
self._activity = None
self._name = None
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def is_locked(self):
"""Return true if lock is locked."""
return True
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return self._device_attrs
def update(self):
"""Update the nello lock properties."""
self._nello_lock.update()
# Location identifiers
location_id = self._nello_lock.location_id
short_id = self._nello_lock.short_id
address = self._nello_lock.address
self._name = 'Nello {}'.format(short_id)
self._device_attrs = {
ATTR_ADDRESS: address,
ATTR_LOCATION_ID: location_id
}
# Process recent activity
activity = self._nello_lock.activity
if self._activity:
# Filter out old events
new_activity = list(
filterfalse(lambda x: x in self._activity, activity))
if new_activity:
for act in new_activity:
activity_type = act.get('type')
if activity_type == 'bell.ring.denied':
event_data = {
'address': address,
'date': act.get('date'),
'description': act.get('description'),
'location_id': location_id,
'short_id': short_id
}
self.hass.bus.fire(EVENT_DOOR_BELL, event_data)
# Save the activity history so that we don't trigger an event twice
self._activity = activity
def unlock(self, **kwargs):
"""Unlock the device."""
if not self._nello_lock.open_door():
_LOGGER.error("Failed to unlock")
| {
"content_hash": "c9fd241d6a70a17fdceb327d51fb9246",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 75,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.5938375350140056,
"repo_name": "stefan-jonasson/home-assistant",
"id": "04030c92425774cf68c89886bb040955fe2fb94a",
"size": "3213",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lock/nello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
} |
"""doc."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from app import db
from helpers import object_list
from models import Port, Tag
from Port.forms import PortForm
ports = Blueprint('ports', __name__, template_folder='templates')
@ports.route('/')
def index():
"""doc."""
ports = Port.query.order_by(Port.c_created_timestamp.desc())
return port_list('ports/index.html', ports)
@ports.route('/<slug>/')
def detail(slug):
"""doc."""
port = Port.query.filter(Port.slug == slug).first_or_404()
return render_template('ports/detail.html', port=port)
@ports.route('/tags/<slug>/')
def tag_detail(slug):
"""doc."""
tag = Tag.query.filter(Tag.slug == slug).first_or_404()
ports = tag.ports.order_by(Port.c_created_timestamp.desc())
return port_list('ports/tag_detail.html', ports, tag=tag)
@ports.route('/tags/')
def tag_index():
"""doc."""
tags = Tag.query.order_by(Tag.name)
return object_list('ports/tag_index.html', tags)
def port_list(template, query, **context):
"""doc."""
valid_statuses = (Port.STATUS_PUBLIC, Port.STATUS_DRAFT)
query = query.filter(Port.status.in_(valid_statuses))
if request.args.get('q'):
search = request.args['q']
query = query.filter(
(Port.body.contains(search)) |
(Port.title.contains(search)))
return object_list(template, query, **context)
def get_port_or_404(slug):
"""doc."""
valid_statuses = (Port.STATUS_PUBLIC, Port.STATUS_DRAFT)(Port.query
.filter(
(Port.slug == slug) &
(Port.status.in_(valid_statuses)))
.first_or_404())
@ports.route('/create/', methods=['GET', 'POST'])
def create():
"""doc."""
if request.method == 'POST':
form = PortForm(request.form)
if form.validate():
port = form.save_port(Port())
db.session.add(port)
db.session.commit()
flash('port "%s" created successfully.' % port.title, 'success')
return redirect(url_for('ports.detail', slug=port.slug))
else:
form = PortForm()
return render_template('ports/create.html', form=form)
@ports.route('/<slug>/edit/', methods=['GET', 'POST'])
def edit(slug):
"""doc."""
port = Port.query.filter(Port.slug == slug).first_or_404()
if request.method == 'POST':
form = PortForm(request.form, obj=port)
if form.validate():
port = form.save_port(port)
db.session.add(port)
db.session.commit()
flash('port "%s" created successfully.' % port.title, 'success')
return redirect(url_for('ports.detail', slug=port.slug))
else:
form = PortForm(obj=port)
return render_template('ports/edit.html', port=port, form=form)
@ports.route('/<slug>/delete/', methods=['GET', 'POST'])
def delete(slug):
"""doc."""
port = Port.query.filter(Port.slug == slug).first_or_404()
if request.method == 'POST':
port.status = port.STATUS_DELETED
db.session.add(port)
db.session.commit()
flash('port "%s" created successfully.' % port.title, 'success')
return redirect(url_for('ports.index'))
return render_template('ports/delete.html', port=port)
| {
"content_hash": "4c08cab26c0e5102fd50d221c0154ac0",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 99,
"avg_line_length": 32.137614678899084,
"alnum_prop": 0.5695118469882957,
"repo_name": "kentaro0919/billing",
"id": "b0e61c1dbda9aef52250c61f608cc81f24cc18d4",
"size": "3503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/customers/port_blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23546"
},
{
"name": "Python",
"bytes": "31495"
}
],
"symlink_target": ""
} |
import os
import sys
import errno
import optparse
import logging
import subprocess
import stat
import datetime
import time
import re
import fuse
from tempfile import mkstemp
from threading import Lock
fuse.fuse_python_api = (0, 2)
log = logging.getLogger('fuse_rsync')
class RsyncModule():
"""
This class implements access to an Rsync module.
"""
def __init__(self, host, module, user = None, password = None):
self._environment = os.environ.copy()
self._remote_url = "rsync://"
if not user is None:
self._remote_url += user + "@"
self._remote_url += host + "/" + module
if not password is None:
self._environment['RSYNC_PASSWORD'] = password
self._attr_cache = {}
def _parse_attrs(self, attrs):
"""
Parse the textual representation of file attributes to binary representation.
"""
result = 0
if attrs[0] == 'd':
result |= stat.S_IFDIR
elif attrs[0] == 'l':
result |= stat.S_IFLNK
elif attrs[0] == '-':
result |= stat.S_IFREG
else:
assert(False)
for i in range(0, 3):
val = 0
if 'r' in attrs[1 + 3 * i: 4 + 3 * i]:
val |= 4
if 'w' in attrs[1 + 3 * i: 4 + 3 * i]:
val |= 2
if 'x' in attrs[1 + 3 * i: 4 + 3 * i]:
val |= 1
result |= val << ((2 - i) * 3)
return result
def list(self, path = '/'):
"""
List files contained in directory __path__.
Returns a list of dictionaries with keys *attrs* (numerical attribute
representation), *size* (file size), *timestamp* (File's atime timestamp
in a datetime object) and *filename* (The file's name).
"""
# See http://stackoverflow.com/questions/10323060/printing-file-permissions-like-ls-l-using-stat2-in-c for modes
RE_LINE = re.compile("^([ldcbps-]([r-][w-][x-]){3})\s+([0-9]+)\s+([0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}) (.*)$")
remote_url = self._remote_url + path
try:
cmdline = ["rsync", "--list-only", remote_url]
log.debug("executing %s", " ".join(cmdline))
output = subprocess.check_output(["rsync", "--list-only", remote_url], env = self._environment)
listing = []
for line in output.decode(encoding = 'iso-8859-1').split("\n"):
match = RE_LINE.match(line)
if not match:
continue
listing.append({
"attrs": self._parse_attrs(match.group(1)),
"size": int(match.group(3)),
"timestamp": datetime.datetime.strptime(match.group(4), "%Y/%m/%d %H:%M:%S"),
"filename": match.group(5)
})
return listing
except subprocess.CalledProcessError as err:
if err.returncode == 23:
return []
raise err
def copy(self, remotepath = '/', localpath = None):
"""
Copy a file from the remote rsync module to the local filesystem.
If no local destination is specified in __localpath__, a temporary
file is created and its filename returned. The temporary file has
to be deleted by the caller.
"""
remote_url = self._remote_url + remotepath
if localpath is None:
(file, localpath) = mkstemp()
os.close(file)
cmdline = ["rsync", "--copy-links", remote_url, localpath]
log.debug("executing %s", " ".join(cmdline))
subprocess.check_call(cmdline, env = self._environment)
return localpath
class FuseRsyncFileInfo(fuse.FuseFileInfo):
"""
Encapsulates the file handle for an opened file.
"""
def __init__(self, handle, **kw):
super(FuseRsyncFileInfo, self).__init__(**kw)
self.keep = True
self.handle = handle
class FuseRsync(fuse.Fuse):
"""
The implementation of the FUSE filesystem.
"""
def __init__(self, *args, **kw):
self.host = None
self.module = None
self.user = None
self.password = None
self.path = "/"
self._attr_cache = {}
self._file_cache = {}
self._file_cache_lock = Lock()
fuse.Fuse.__init__(self, *args, **kw)
self.parser.add_option(mountopt = 'user', default = None, help = "Rsync user on the remote host")
self.parser.add_option(mountopt = 'password', type = str, default = None, help = "Rsync password on the remote host")
self.parser.add_option(mountopt = 'host', type = str, help = "Rsync remote host")
self.parser.add_option(mountopt = 'module', type = str, help = "Rsync module on remote host")
self.parser.add_option(mountopt = 'path', type = str, default = "/", help = "Rsync path in module on remote host that is supposed to be the root point")
# Helpers
# =======
def _full_path(self, partial):
if partial.startswith("/"):
partial = partial[1:]
path = os.path.join(self.path, partial)
return path
def init(self):
options = self.cmdline[0]
log.debug("Invoked fsinit() with host=%s, module=%s, user=%s, password=%s", options.host, options.module, options.user, options.password)
self._rsync = RsyncModule(options.host, options.module, options.user, options.password)
# Filesystem methods
# ==================
#def access(self, path, mode):
#full_path = self._full_path(path)
#if not os.access(full_path, mode):
#raise FuseOSError(errno.EACCES)
#def chmod(self, path, mode):
#full_path = self._full_path(path)
#return os.chmod(full_path, mode)
#def chown(self, path, uid, gid):
#full_path = self._full_path(path)
#return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
try:
log.debug("Invoked getattr('%s')", path)
path = self._full_path(path)
st = fuse.Stat()
if path == "/":
st.st_atime = int(time.time())
st.st_ctime = int(time.time())
st.st_mode = stat.S_IFDIR | 0555
st.st_mtime = int(time.time())
st.st_nlink = 2
st.st_uid = os.geteuid()
st.st_gid = os.getegid()
return st
if path in self._attr_cache:
info = self._attr_cache[path]
else:
listing = self._rsync.list(path)
if len(listing) != 1:
log.warn("Found none or several files for path")
return -errno.ENOENT
info = listing[0]
self._attr_cache[path] = info
timestamp = (info["timestamp"] - datetime.datetime(1970,1,1)).total_seconds()
st.st_atime = timestamp
st.st_ctime = timestamp
st.st_uid = os.geteuid()
st.st_gid = os.getegid()
if info["attrs"] & stat.S_IFDIR:
st.st_mode = stat.S_IFDIR | 0555
else:
st.st_mode = stat.S_IFREG | 0444
st.st_mtime = timestamp
st.st_nlink = 1
st.st_size = info["size"]
return st
except Exception as ex:
log.exception("while doing getattr")
return -errno.ENOENT
def readdir(self, path, offset):
try:
if not path.endswith("/"):
path += "/"
log.debug("Invoked readdir('%s')", path)
full_path = self._full_path(path)
yield fuse.Direntry('.')
yield fuse.Direntry('..')
for dirent in self._rsync.list(full_path):
if dirent["filename"] == ".":
continue
self._attr_cache[path + dirent["filename"]] = dirent
yield fuse.Direntry(str(dirent["filename"]))
except Exception as ex:
log.exception("While doing readdir")
#def readlink(self, path):
#pathname = os.readlink(self._full_path(path))
#if pathname.startswith("/"):
## Path name is absolute, sanitize it.
#return os.path.relpath(pathname, self.root)
#else:
#return pathname
#def mknod(self, path, mode, dev):
#return os.mknod(self._full_path(path), mode, dev)
#def rmdir(self, path):
#full_path = self._full_path(path)
#return os.rmdir(full_path)
#def mkdir(self, path, mode):
#return os.mkdir(self._full_path(path), mode)
#def statfs(self, path):
#full_path = self._full_path(path)
#stv = os.statvfs(full_path)
#return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
#'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
#'f_frsize', 'f_namemax'))
#def unlink(self, path):
#return os.unlink(self._full_path(path))
#def symlink(self, target, name):
#return os.symlink(self._full_path(target), self._full_path(name))
#def rename(self, old, new):
#return os.rename(self._full_path(old), self._full_path(new))
#def link(self, target, name):
#return os.link(self._full_path(target), self._full_path(name))
#def utimens(self, path, times=None):
#return os.utime(self._full_path(path), times)
## File methods
## ============
def open(self, path, flags):
log.debug("invoking open(%s, %d)", path, flags)
full_path = self._full_path(path)
if flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) != os.O_RDONLY:
return -errno.EACCES
with self._file_cache_lock:
if not path in self._file_cache:
localfile = self._rsync.copy(full_path)
self._file_cache[path] = {"refcount": 1, "localpath": localfile}
else:
self._file_cache[path]["refcount"] += 1
localfile = self._file_cache[path]["localpath"]
handle = os.open(localfile, os.O_RDONLY)
log.debug("Created file handle %d", handle)
return FuseRsyncFileInfo(handle)
#def create(self, path, mode, fi=None):
#full_path = self._full_path(path)
#return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)
def read(self, path, length, offset, fh):
log.debug("invoking read(%s, %d, %d, %d)", path, length, offset, fh.handle)
os.lseek(fh.handle, offset, os.SEEK_SET)
return os.read(fh.handle, length)
#def write(self, path, buf, offset, fh):
#os.lseek(fh, offset, os.SEEK_SET)
#return os.write(fh, buf)
#def truncate(self, path, length, fh=None):
#full_path = self._full_path(path)
#with open(full_path, 'r+') as f:
#f.truncate(length)
#def flush(self, path, fh):
#return os.fsync(fh)
def release(self, path, dummy, fh):
log.debug("invoking release(%s, %d, %d)", path, dummy, fh.handle)
os.close(fh.handle)
with self._file_cache_lock:
self._file_cache[path]["refcount"] -= 1
if self._file_cache[path]["refcount"] <= 0:
localfile = self._file_cache[path]["localpath"]
del self._file_cache[path]
os.unlink(localfile)
#def fsync(self, path, fdatasync, fh):
#return self.flush(path, fh)
if __name__ == '__main__':
fs = FuseRsync()
fs.parse(errex=1)
#TODO: Below is hacky, find properly parsed debug attribute
if '-d' in sys.argv:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.ERROR)
fs.init()
fs.main()
| {
"content_hash": "3d739e132acaf82ab0ac7466307685f9",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 160,
"avg_line_length": 35.21511627906977,
"alnum_prop": 0.5282317979197623,
"repo_name": "zaddach/fuse-rsync",
"id": "1df95feb779599e069444e57a2cc3e57a168fb07",
"size": "12264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuse_rsync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12137"
}
],
"symlink_target": ""
} |
import sys
import csv
import ctypes
import time
from arango import ArangoClient
monthDecode = {
"JAN":"01", "FEB":"02", "MAR":"03", "APR":"04",
"MAY":"05", "JUN":"06", "JUL":"07", "AUG":"08",
"SEP":"09", "OCT":"10", "NOV":"11", "DEC":"12"
}
def decodeDate(d):
if len(d) == 24:
month = d[3:6]
day = d[0:2]
year = d[7:11]
time = d[12:24]
year += "-";
year += monthDecode.get(month, "01")
year += "-"
year += day
year += "T"
year += time
return year
return d
def main():
if len(sys.argv) < 6:
print("Usage: host database collection data_file count [offset] Example: python WikiLoader.py 'http://localhost:8529' _system wikipedia benchmark.data 10000000")
return
# Override csv default 128k field size
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Initialize the client for ArangoDB.
client = ArangoClient(hosts=sys.argv[1])
# Upload settings
filename = sys.argv[4] # data file
collection = sys.argv[3] # target collection
database = sys.argv[2] # target database
line_limit = int(sys.argv[5]) # how many documents to upload
batch_size = 10000 # batch size for inserting into Arango
offset = 0
if len(sys.argv) > 6:
offset = int(sys.argv[6])
db = client.db(database)
if db.has_collection(collection):
wikipedia = db.collection(collection)
else:
wikipedia = db.create_collection(collection)
f = open(filename, mode ='r', encoding='utf-8', errors='replace')
reader = csv.reader(f, delimiter='\t')
data = []
total = 0
totaltimeNs = 0
count = offset
for row in reader:
if offset > 0:
offset = offset - 1
continue
data.append({'title': row[0].replace("\n", "\\n").replace("\"", "'").replace("\\","/"),
'body': row[2].replace("\n", "\\n").replace("\"", "'").replace("\\","/"),
'count': count, 'created':decodeDate(row[1])})
if len(data) > batch_size:
# start time
start = time.perf_counter_ns()
wikipedia.insert_many(data)
# stop time
took = (time.perf_counter_ns() - start)
totaltimeNs += took
data.clear()
print('Loaded ' + str(total) + ' ' + str( round((total/line_limit) * 100, 2)) +
'% in total ' + str(totaltimeNs / 1000000) + 'ms Batch:' +
str(took/1000000) + 'ms Avg:' + str( (totaltimeNs/ (total/batch_size))/1000000) + 'ms \n')
total = total + 1
if total >= line_limit:
break
count = count + 1
if len(data) > 0:
# start time
start = time.perf_counter_ns()
wikipedia.insert_many(data)
# stop time
took = (time.perf_counter_ns() - start)
totaltimeNs += took
print('Loaded ' + str(total) + ' ' + str( round((total/line_limit) * 100, 2)) +
'% in total ' + str(totaltimeNs / 1000000) + 'ms Batch:' +
str(took/1000000) + 'ms Avg:' + str( (totaltimeNs/ (total/batch_size))/1000000) + 'ms \n')
f.close()
if __name__== "__main__":
main()
| {
"content_hash": "8c6e3ca3755f2c42ea2d0752deac1a14",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 165,
"avg_line_length": 30.367346938775512,
"alnum_prop": 0.5789650537634409,
"repo_name": "graetzer/arangodb",
"id": "79e37f69b6c17829f66949189aecd0c8c626f86b",
"size": "4167",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "3rdParty/iresearch/scripts/ArangoDBLoader/WikiLoader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "63025"
},
{
"name": "C",
"bytes": "7952921"
},
{
"name": "C#",
"bytes": "96431"
},
{
"name": "C++",
"bytes": "274543069"
},
{
"name": "CMake",
"bytes": "646773"
},
{
"name": "CSS",
"bytes": "1054160"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "131"
},
{
"name": "HTML",
"bytes": "2215528"
},
{
"name": "Java",
"bytes": "922156"
},
{
"name": "JavaScript",
"bytes": "53300241"
},
{
"name": "LLVM",
"bytes": "24129"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "17899"
},
{
"name": "M4",
"bytes": "575204"
},
{
"name": "Makefile",
"bytes": "492694"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "28404"
},
{
"name": "Objective-C",
"bytes": "18435"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "107274"
},
{
"name": "Pascal",
"bytes": "150599"
},
{
"name": "Perl",
"bytes": "564374"
},
{
"name": "Perl6",
"bytes": "9918"
},
{
"name": "Python",
"bytes": "4527647"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1007604"
},
{
"name": "Ruby",
"bytes": "929950"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "424800"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "551977"
},
{
"name": "Yacc",
"bytes": "53072"
}
],
"symlink_target": ""
} |
import time
from concurrent import futures
# Pin Definitons:
_ledPin = 21
# blink ON duration in sec
_blink_duration = 0.15
# https://stackoverflow.com/questions/19033818/how-to-call-a-function-on-a-running-python-thread
# https://docs.python.org/3.4/library/concurrent.futures.html
# generating the executor takes time (200-300 ms on the Raspi Zero) so do it once and reuse it thoughout the whole program execution
_executor = futures.ThreadPoolExecutor(max_workers=5)
########################################################################
## The blink function: pass the call to a worker thread to allow server to continue do serve pages
def ledz_blink():
#Multithreading
_b = _blinker()
_executor.submit(_b.blink)
########################################################################
# Multithreading needs a class, there it is
# https://stackoverflow.com/questions/19033818/how-to-call-a-function-on-a-running-python-thread
class _blinker:
def blink(self):
# GPIO.output(_ledPin, True)
time.sleep(_blink_duration)
# GPIO.output(_ledPin, False)
########################################################################
## Init function, call it on startup
def ledz_init(c):
print ("INFO: called ledz_init()")
#read pin from config
global _ledPin
_ledPin = c.getint("Ledz", "pin")
# Pin Setup:
# GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
# GPIO.setup(_ledPin, GPIO.OUT) # LED pin set as output
# Initial state for LEDs:
# GPIO.output(_ledPin, GPIO.LOW)
########################################################################
## Finalization, call on close
def ledz_finalize():
print ("INFO: called ledz_finalize()")
# GPIO.cleanup() # cleanup all GPIO
| {
"content_hash": "e291f400e1ca454640d2ea7c8993898b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 133,
"avg_line_length": 37.083333333333336,
"alnum_prop": 0.5803370786516854,
"repo_name": "AlanFromJapan/alanWebSites",
"id": "e1846b92845f5c4cb0bbf9eeae61a87b50326367",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ledz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "12241"
},
{
"name": "Batchfile",
"bytes": "43"
},
{
"name": "C",
"bytes": "20055"
},
{
"name": "CSS",
"bytes": "85155"
},
{
"name": "HTML",
"bytes": "1468157"
},
{
"name": "Processing",
"bytes": "26697"
},
{
"name": "Python",
"bytes": "17526"
},
{
"name": "Shell",
"bytes": "2439"
}
],
"symlink_target": ""
} |
"""
reader.py
Matthew Brooks, 2018
Assembler for the MBVM.
"""
def load_from_file(file_path):
"""
TODO
:param file_path:
:return:
"""
with open(file_path, 'r') as f:
data = f.readlines()
processed_lines = []
for line in data:
line = line.strip()
# Remove commented lines
if line and not line.startswith("#"):
# Convert to lower case
processed_lines.append(line.lower())
return processed_lines
| {
"content_hash": "7ac597a692c9d797bc94a196eb826d70",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 48,
"avg_line_length": 16.466666666666665,
"alnum_prop": 0.5607287449392713,
"repo_name": "mattixtech/mbas",
"id": "2ae993663ed63cf96bc45aa8024f1dba4f758f25",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4822"
}
],
"symlink_target": ""
} |
from django.template import Template, Context
from .base import BaseSelectableTestCase
__all__ = (
'JqueryTagTestCase',
'ThemeTagTestCase',
)
class JqueryTagTestCase(BaseSelectableTestCase):
def assertJQueryVersion(self, result, version):
expected = "//ajax.googleapis.com/ajax/libs/jquery/%s/jquery.min.js" % version
self.assertTrue(expected in result)
def assertUIVersion(self, result, version):
expected = "//ajax.googleapis.com/ajax/libs/jqueryui/%s/jquery-ui.js" % version
self.assertTrue(expected in result)
def test_render(self):
"Render template tag with default versions."
template = Template("{% load selectable_tags %}{% include_jquery_libs %}")
context = Context({})
result = template.render(context)
self.assertJQueryVersion(result, '1.7.2')
self.assertUIVersion(result, '1.8.23')
def test_render_jquery_version(self):
"Render template tag with specified jQuery version."
template = Template("{% load selectable_tags %}{% include_jquery_libs '1.4.3' %}")
context = Context({})
result = template.render(context)
self.assertJQueryVersion(result, '1.4.3')
def test_render_variable_jquery_version(self):
"Render using jQuery version from the template context."
version = '1.4.3'
template = Template("{% load selectable_tags %}{% include_jquery_libs version %}")
context = Context({'version': version})
result = template.render(context)
self.assertJQueryVersion(result, '1.4.3')
def test_render_jquery_ui_version(self):
"Render template tag with specified jQuery UI version."
template = Template("{% load selectable_tags %}{% include_jquery_libs '1.4.3' '1.8.13' %}")
context = Context({})
result = template.render(context)
self.assertUIVersion(result, '1.8.13')
def test_render_variable_jquery_ui_version(self):
"Render using jQuery UI version from the template context."
version = '1.8.13'
template = Template("{% load selectable_tags %}{% include_jquery_libs '1.4.3' version %}")
context = Context({'version': version})
result = template.render(context)
self.assertUIVersion(result, '1.8.13')
def test_render_no_jquery(self):
"Render template tag without jQuery."
template = Template("{% load selectable_tags %}{% include_jquery_libs '' %}")
context = Context({})
result = template.render(context)
self.assertTrue('jquery.min.js' not in result)
def test_render_no_jquery_ui(self):
"Render template tag without jQuery UI."
template = Template("{% load selectable_tags %}{% include_jquery_libs '1.7.2' '' %}")
context = Context({})
result = template.render(context)
self.assertTrue('jquery-ui.js' not in result)
class ThemeTagTestCase(BaseSelectableTestCase):
def assertUICSS(self, result, theme, version):
expected = "//ajax.googleapis.com/ajax/libs/jqueryui/%s/themes/%s/jquery-ui.css" % (version, theme)
self.assertTrue(expected in result)
def test_render(self):
"Render template tag with default settings."
template = Template("{% load selectable_tags %}{% include_ui_theme %}")
context = Context({})
result = template.render(context)
self.assertUICSS(result, 'base', '1.8.23')
def test_render_version(self):
"Render template tag with alternate version."
template = Template("{% load selectable_tags %}{% include_ui_theme 'base' '1.8.13' %}")
context = Context({})
result = template.render(context)
self.assertUICSS(result, 'base', '1.8.13')
def test_variable_version(self):
"Render using version from content variable."
version = '1.8.13'
template = Template("{% load selectable_tags %}{% include_ui_theme 'base' version %}")
context = Context({'version': version})
result = template.render(context)
self.assertUICSS(result, 'base', version)
def test_render_theme(self):
"Render template tag with alternate theme."
template = Template("{% load selectable_tags %}{% include_ui_theme 'ui-lightness' %}")
context = Context({})
result = template.render(context)
self.assertUICSS(result, 'ui-lightness', '1.8.23')
def test_variable_theme(self):
"Render using theme from content variable."
theme = 'ui-lightness'
template = Template("{% load selectable_tags %}{% include_ui_theme theme %}")
context = Context({'theme': theme})
result = template.render(context)
self.assertUICSS(result, theme, '1.8.23')
| {
"content_hash": "de96c8e239ed4f42d56b20c981af52c9",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 107,
"avg_line_length": 41.6,
"alnum_prop": 0.6339882943143813,
"repo_name": "affan2/django-selectable",
"id": "c0042fb0282ca60aa3777f2849b1a40d2701ebed",
"size": "4784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selectable/tests/test_templatetags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1420"
},
{
"name": "HTML",
"bytes": "6028"
},
{
"name": "JavaScript",
"bytes": "179513"
},
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Python",
"bytes": "109482"
}
],
"symlink_target": ""
} |
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import caches, DEFAULT_CACHE_ALIAS
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers, get_max_age
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _session_accessed(self, request):
try:
return request.session.accessed
except AttributeError:
return False
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if not request.method in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key, None)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.cache = caches[self.cache_alias]
| {
"content_hash": "923b7430c84a574658a1672e5f3d6647",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 102,
"avg_line_length": 40.53107344632768,
"alnum_prop": 0.6732645664901031,
"repo_name": "errx/django",
"id": "6faebcd3f3950d8b8f619a747c7edd5a9cebd398",
"size": "7174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/middleware/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9469402"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from .views import home, home_files
urlpatterns = [
# Examples:
# url(r'^$', 'taskbuster.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^(?P<filename>(robots.txt)|(humans.txt))$',
home_files, name='home-files'),
]
urlpatterns += i18n_patterns(
url(r'^$', home, name='home'),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "8219770dfda4576946ca786bf1ea07b6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 26.57894736842105,
"alnum_prop": 0.6415841584158416,
"repo_name": "RDV1984/taskbuster-boilerplate",
"id": "2b35be0c20571c96a590a32d8538318baedcef55",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskbuster/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "256"
},
{
"name": "HTML",
"bytes": "7282"
},
{
"name": "JavaScript",
"bytes": "74"
},
{
"name": "Python",
"bytes": "8400"
}
],
"symlink_target": ""
} |
"""
Implements a cache for plot data at the project level. Cached plot data has a "path" (e.g. 'optimization/generations_data')
and dependencies (a list of files that are used to produce that data) as well as the parameters used in that data.
The cache object is passed to the `calc_graph` method and the plot is responsible for retrieving data from the cache.
"""
import functools
import hashlib
import json
import os
import time
import pandas as pd
class PlotCache(object):
"""A cache for plot data. Use the ``lookup`` method to retrieve data from the cache."""
def __init__(self, project):
"""Initialize the cache from disk"""
self.parameter_guard = {} # data_path => set(parameters.keys()) - just a check for programming errors
self.project = project
def _parameter_hash(self, parameters):
return hashlib.md5(repr(sorted(parameters.items())).encode("utf-8")).hexdigest()
def _cached_data_file(self, data_path, parameters):
return os.path.join(self.project, '.cache', data_path, self._parameter_hash(parameters))
def _cached_div_file(self, plot):
data_path = os.path.join(plot.category_name, plot.id())
return self._cached_data_file(data_path, plot.parameters) + '.div'
def _cached_table_file(self, plot):
data_path = os.path.join(plot.category_name, plot.id())
return self._cached_data_file(data_path, plot.parameters) + '.table.div'
def lookup(self, data_path, plot, producer):
cache_timestamp = self.cache_timestamp(self._cached_data_file(data_path, plot.parameters))
if cache_timestamp < self.newest_dependency(plot.input_files):
return self.store_cached_value(data_path, plot.parameters, producer)
return self.load_cached_value(data_path, plot.parameters)
def lookup_plot_div(self, plot, producer):
"""Lookup the cache of a plot created with plot.plot_div()"""
div_file = self._cached_div_file(plot)
cache_timestamp = self.cache_timestamp(div_file)
if cache_timestamp < self.newest_dependency(plot.input_files):
plot_div = producer()
folder = os.path.dirname(div_file)
if not os.path.exists(folder):
os.makedirs(folder)
with open(div_file, 'w') as div_fp:
div_fp.write(plot_div)
else:
print('Loading plot_div from cache: {div_file}'.format(div_file=div_file))
with open(div_file, 'r') as div_fp:
plot_div = div_fp.read()
return plot_div
def lookup_table_div(self, plot, producer):
"""Lookup the cache of a table created with plot.table_div()"""
div_file = self._cached_table_file(plot)
cache_timestamp = self.cache_timestamp(div_file)
if cache_timestamp < self.newest_dependency(plot.input_files):
table_div = producer()
folder = os.path.dirname(div_file)
if not os.path.exists(folder):
os.makedirs(folder)
with open(div_file, 'w') as div_fp:
div_fp.write(table_div)
else:
# print('Loading table_div from cache: {div_file}'.format(div_file=div_file))
with open(div_file, 'r') as div_fp:
table_div = div_fp.read()
return table_div
def lookup_plot_data(self, plot, producer):
"""Lookup the cache of a plotly graph data created with plot.calc_graph"""
from plotly.utils import PlotlyJSONEncoder
data_path = os.path.join(plot.category_name, plot.id())
data_file = self._cached_data_file(data_path, plot.parameters) + '.graphdata'
cache_timestamp = self.cache_timestamp(data_file)
if cache_timestamp < self.newest_dependency(plot.input_files):
plot_data = producer()
folder = os.path.dirname(data_file)
if not os.path.exists(folder):
os.makedirs(folder)
with open(data_file, 'w') as data_json_path:
data_json = json.dumps(plot_data, cls=PlotlyJSONEncoder)
data_json_path.write(data_json)
else:
with open(data_file, 'r') as data_json_path:
plot_data = json.loads(data_json_path.read())
return plot_data
def cache_timestamp(self, path):
"""Return a timestamp (like ``os.path.getmtime``) to compare to. Returns 0 if there is no data in the cache"""
if not os.path.exists(path):
return 0
else:
return os.path.getmtime(path)
def newest_dependency(self, input_files):
"""Returns the newest timestamp (``os.path.getmtime`` and ``time.time()``) of the input_files - the idea being,
that if the cache is newer than this, then the cache is valid.
:param input_files: A list of tuples (locator method, args) that, when applied, produce a path"""
try:
return max(os.path.getmtime(locator_method(*args)) for locator_method, args in input_files)
except:
print('Could not read input files for cache!')
return time.time()
def store_cached_value(self, data_path, parameters, producer):
"""Store the Dataframe returned from producer and return it."""
data = producer()
data_folder = os.path.join(self.project, '.cache', data_path)
if not os.path.exists(data_folder):
os.makedirs(data_folder)
data.to_pickle(self._cached_data_file(data_path, parameters))
return data
def load_cached_value(self, data_path, parameters):
"""Load a Dataframe from disk"""
return pd.read_pickle(self._cached_data_file(data_path, parameters))
class MemoryPlotCache(PlotCache):
"""Extend the PlotCache to also keep a copy of the cache in memory"""
def __init__(self, project):
super(MemoryPlotCache, self).__init__(project)
self._cache = {} # _cached_data_file -> df
def load_cached_value(self, data_path, parameters):
"""Check memory cache before loading from disk"""
key = self._cached_data_file(data_path, parameters)
if not key in self._cache:
self._cache[key] = super(MemoryPlotCache, self).load_cached_value(data_path, parameters)
return self._cache[key]
def store_cached_value(self, data_path, parameters, producer):
"""Update memory cache when storing to disk"""
data = super(MemoryPlotCache, self).store_cached_value(data_path, parameters, producer)
key = self._cached_data_file(data_path, parameters)
self._cache[key] = data
return data
class NullPlotCache(PlotCache):
"""A dummy cache that doesn't cache anything - for comparing performance of PlotCache"""
def __init__(self):
super(NullPlotCache, self).__init__(None)
def lookup(self, data_path, plot, producer):
return producer()
def lookup_plot_div(self, plot, producer):
return producer()
def lookup_table_div(self, plot, producer):
return producer()
def lookup_plot_data(self, plot, producer):
return producer()
def cached(producer):
"""Calls to a function wrapped with this decorator are cached using ``self.cache.lookup``"""
@functools.wraps(producer)
def wrapper(self):
return self.cache.lookup(data_path=os.path.join(self.category_name, producer.__name__),
plot=self, producer=lambda: producer(self))
return wrapper
| {
"content_hash": "96bc9c48d2edaaf7a0f3339c4ed71d75",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 123,
"avg_line_length": 41.27472527472528,
"alnum_prop": 0.6308572949946751,
"repo_name": "architecture-building-systems/CEAforArcGIS",
"id": "d569779d7d7d9593555cdbb0d260d2993a5bbb51",
"size": "7512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cea/plots/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2776"
},
{
"name": "Jupyter Notebook",
"bytes": "135743"
},
{
"name": "Makefile",
"bytes": "675"
},
{
"name": "NSIS",
"bytes": "3948"
},
{
"name": "Python",
"bytes": "1217645"
},
{
"name": "Shell",
"bytes": "7194"
}
],
"symlink_target": ""
} |
from sn_agent_web import SettingsBase, Required
class WebSettings(SettingsBase):
def __init__(self, **custom_settings):
self.STATIC_ROOT_URL = '/static'
self.ETH_CLIENT = 'http://geth:8545'
self._ENV_PREFIX = 'SN_WEB_'
self.COOKIE_SECRET = Required(str)
super().__init__(**custom_settings)
| {
"content_hash": "0229a1c3ea7ae8d2adefced6c15b795d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 47,
"avg_line_length": 33.6,
"alnum_prop": 0.6220238095238095,
"repo_name": "jensenbox/singnet",
"id": "b56d68f9527e0ee7c2c95c446bfac218f99d2bdc",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent-web/sn_agent_web/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69753"
},
{
"name": "HCL",
"bytes": "113"
},
{
"name": "HTML",
"bytes": "13920"
},
{
"name": "JavaScript",
"bytes": "1397870"
},
{
"name": "Makefile",
"bytes": "635"
},
{
"name": "Python",
"bytes": "195690"
},
{
"name": "Shell",
"bytes": "5572"
}
],
"symlink_target": ""
} |
'''OpenGL extension VERSION.GL_1_5
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_VERSION_GL_1_5'
_DEPRECATED = False
GL_BUFFER_SIZE = constant.Constant( 'GL_BUFFER_SIZE', 0x8764 )
GL_BUFFER_USAGE = constant.Constant( 'GL_BUFFER_USAGE', 0x8765 )
GL_QUERY_COUNTER_BITS = constant.Constant( 'GL_QUERY_COUNTER_BITS', 0x8864 )
GL_CURRENT_QUERY = constant.Constant( 'GL_CURRENT_QUERY', 0x8865 )
GL_QUERY_RESULT = constant.Constant( 'GL_QUERY_RESULT', 0x8866 )
GL_QUERY_RESULT_AVAILABLE = constant.Constant( 'GL_QUERY_RESULT_AVAILABLE', 0x8867 )
GL_ARRAY_BUFFER = constant.Constant( 'GL_ARRAY_BUFFER', 0x8892 )
GL_ELEMENT_ARRAY_BUFFER = constant.Constant( 'GL_ELEMENT_ARRAY_BUFFER', 0x8893 )
GL_ARRAY_BUFFER_BINDING = constant.Constant( 'GL_ARRAY_BUFFER_BINDING', 0x8894 )
GL_ELEMENT_ARRAY_BUFFER_BINDING = constant.Constant( 'GL_ELEMENT_ARRAY_BUFFER_BINDING', 0x8895 )
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING = constant.Constant( 'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING', 0x889F )
GL_READ_ONLY = constant.Constant( 'GL_READ_ONLY', 0x88B8 )
GL_WRITE_ONLY = constant.Constant( 'GL_WRITE_ONLY', 0x88B9 )
GL_READ_WRITE = constant.Constant( 'GL_READ_WRITE', 0x88BA )
GL_BUFFER_ACCESS = constant.Constant( 'GL_BUFFER_ACCESS', 0x88BB )
GL_BUFFER_MAPPED = constant.Constant( 'GL_BUFFER_MAPPED', 0x88BC )
GL_BUFFER_MAP_POINTER = constant.Constant( 'GL_BUFFER_MAP_POINTER', 0x88BD )
GL_STREAM_DRAW = constant.Constant( 'GL_STREAM_DRAW', 0x88E0 )
GL_STREAM_READ = constant.Constant( 'GL_STREAM_READ', 0x88E1 )
GL_STREAM_COPY = constant.Constant( 'GL_STREAM_COPY', 0x88E2 )
GL_STATIC_DRAW = constant.Constant( 'GL_STATIC_DRAW', 0x88E4 )
GL_STATIC_READ = constant.Constant( 'GL_STATIC_READ', 0x88E5 )
GL_STATIC_COPY = constant.Constant( 'GL_STATIC_COPY', 0x88E6 )
GL_DYNAMIC_DRAW = constant.Constant( 'GL_DYNAMIC_DRAW', 0x88E8 )
GL_DYNAMIC_READ = constant.Constant( 'GL_DYNAMIC_READ', 0x88E9 )
GL_DYNAMIC_COPY = constant.Constant( 'GL_DYNAMIC_COPY', 0x88EA )
GL_SAMPLES_PASSED = constant.Constant( 'GL_SAMPLES_PASSED', 0x8914 )
glGenQueries = platform.createExtensionFunction(
'glGenQueries',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei,arrays.GLuintArray,),
doc='glGenQueries(GLsizei(n), GLuintArray(ids)) -> None',
argNames=('n','ids',),
deprecated=_DEPRECATED,
)
glDeleteQueries = platform.createExtensionFunction(
'glDeleteQueries',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei,arrays.GLuintArray,),
doc='glDeleteQueries(GLsizei(n), GLuintArray(ids)) -> None',
argNames=('n','ids',),
deprecated=_DEPRECATED,
)
glIsQuery = platform.createExtensionFunction(
'glIsQuery',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLboolean,
argTypes=(constants.GLuint,),
doc='glIsQuery(GLuint(id)) -> constants.GLboolean',
argNames=('id',),
deprecated=_DEPRECATED,
)
glBeginQuery = platform.createExtensionFunction(
'glBeginQuery',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLuint,),
doc='glBeginQuery(GLenum(target), GLuint(id)) -> None',
argNames=('target','id',),
deprecated=_DEPRECATED,
)
glEndQuery = platform.createExtensionFunction(
'glEndQuery',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,),
doc='glEndQuery(GLenum(target)) -> None',
argNames=('target',),
deprecated=_DEPRECATED,
)
glGetQueryiv = platform.createExtensionFunction(
'glGetQueryiv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLenum,arrays.GLintArray,),
doc='glGetQueryiv(GLenum(target), GLenum(pname), GLintArray(params)) -> None',
argNames=('target','pname','params',),
deprecated=_DEPRECATED,
)
glGetQueryObjectiv = platform.createExtensionFunction(
'glGetQueryObjectiv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,arrays.GLintArray,),
doc='glGetQueryObjectiv(GLuint(id), GLenum(pname), GLintArray(params)) -> None',
argNames=('id','pname','params',),
deprecated=_DEPRECATED,
)
glGetQueryObjectuiv = platform.createExtensionFunction(
'glGetQueryObjectuiv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLenum,arrays.GLuintArray,),
doc='glGetQueryObjectuiv(GLuint(id), GLenum(pname), GLuintArray(params)) -> None',
argNames=('id','pname','params',),
deprecated=_DEPRECATED,
)
glBindBuffer = platform.createExtensionFunction(
'glBindBuffer',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLuint,),
doc='glBindBuffer(GLenum(target), GLuint(buffer)) -> None',
argNames=('target','buffer',),
deprecated=_DEPRECATED,
)
glDeleteBuffers = platform.createExtensionFunction(
'glDeleteBuffers',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei,arrays.GLuintArray,),
doc='glDeleteBuffers(GLsizei(n), GLuintArray(buffers)) -> None',
argNames=('n','buffers',),
deprecated=_DEPRECATED,
)
glGenBuffers = platform.createExtensionFunction(
'glGenBuffers',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLsizei,arrays.GLuintArray,),
doc='glGenBuffers(GLsizei(n), GLuintArray(buffers)) -> None',
argNames=('n','buffers',),
deprecated=_DEPRECATED,
)
glIsBuffer = platform.createExtensionFunction(
'glIsBuffer',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLboolean,
argTypes=(constants.GLuint,),
doc='glIsBuffer(GLuint(buffer)) -> constants.GLboolean',
argNames=('buffer',),
deprecated=_DEPRECATED,
)
glBufferData = platform.createExtensionFunction(
'glBufferData',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLsizeiptr,ctypes.c_void_p,constants.GLenum,),
doc='glBufferData(GLenum(target), GLsizeiptr(size), c_void_p(data), GLenum(usage)) -> None',
argNames=('target','size','data','usage',),
deprecated=_DEPRECATED,
)
glBufferSubData = platform.createExtensionFunction(
'glBufferSubData',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLintptr,constants.GLsizeiptr,ctypes.c_void_p,),
doc='glBufferSubData(GLenum(target), GLintptr(offset), GLsizeiptr(size), c_void_p(data)) -> None',
argNames=('target','offset','size','data',),
deprecated=_DEPRECATED,
)
glGetBufferSubData = platform.createExtensionFunction(
'glGetBufferSubData',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLintptr,constants.GLsizeiptr,ctypes.c_void_p,),
doc='glGetBufferSubData(GLenum(target), GLintptr(offset), GLsizeiptr(size), c_void_p(data)) -> None',
argNames=('target','offset','size','data',),
deprecated=_DEPRECATED,
)
glMapBuffer = platform.createExtensionFunction(
'glMapBuffer',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=ctypes.c_void_p,
argTypes=(constants.GLenum,constants.GLenum,),
doc='glMapBuffer(GLenum(target), GLenum(access)) -> ctypes.c_void_p',
argNames=('target','access',),
deprecated=_DEPRECATED,
)
glUnmapBuffer = platform.createExtensionFunction(
'glUnmapBuffer',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=constants.GLboolean,
argTypes=(constants.GLenum,),
doc='glUnmapBuffer(GLenum(target)) -> constants.GLboolean',
argNames=('target',),
deprecated=_DEPRECATED,
)
glGetBufferParameteriv = platform.createExtensionFunction(
'glGetBufferParameteriv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLenum,arrays.GLintArray,),
doc='glGetBufferParameteriv(GLenum(target), GLenum(pname), GLintArray(params)) -> None',
argNames=('target','pname','params',),
deprecated=_DEPRECATED,
)
glGetBufferPointerv = platform.createExtensionFunction(
'glGetBufferPointerv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLenum,arrays.GLvoidpArray,),
doc='glGetBufferPointerv(GLenum(target), GLenum(pname), GLvoidpArray(params)) -> None',
argNames=('target','pname','params',),
deprecated=_DEPRECATED,
)
# import legacy entry points to allow checking for bool(entryPoint)
from OpenGL.raw.GL.VERSION.GL_1_5_DEPRECATED import *
| {
"content_hash": "53af40720760eb6aed42e1f21ac81354",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 108,
"avg_line_length": 36.583333333333336,
"alnum_prop": 0.7712504495863806,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "a92600ef8638c2662a1d3ba09d511da7a4567dc8",
"size": "8341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_1_5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import os
# Read version from VERSION file
__version__ = open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'VERSION')
).read().rstrip()
| {
"content_hash": "b1e1831fc40627bbd5ec33ee8b13f054",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 72,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.6602564102564102,
"repo_name": "pablodav/burp_server_reports",
"id": "f70d36636215f58502603634e3405240e239ae00",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burp_reports/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "810"
},
{
"name": "Makefile",
"bytes": "613"
},
{
"name": "Python",
"bytes": "107809"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
} |
import re
from django.http import HttpResponse
from django.shortcuts import render
from rest_framework.renderers import JSONRenderer
from . import utils
def index(request, short_url):
short_url = re.sub("/seguimiento/", "", short_url)
item = utils.get_proyecto_from_short_url(short_url)
item.expediente_events = utils.get_events_from_expediente(item.id)
friendly_code = "{}-{}".format(
item.codigo,
item.legislatura,
)
return render(request, "seguimientos/index.html",
{
"item": item,
"friendly_code": friendly_code,
})
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
super(JSONResponse, self).__init__(content, **kwargs)
| {
"content_hash": "5325603c449a8f4e5767e3b1947fb58c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 29.032258064516128,
"alnum_prop": 0.6155555555555555,
"repo_name": "proyectosdeley/proyectos_de_ley",
"id": "e10890ea63982992954be9364cb063095a98c494",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proyectos_de_ley/seguimientos/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "807"
},
{
"name": "CSS",
"bytes": "947"
},
{
"name": "HTML",
"bytes": "41025"
},
{
"name": "JavaScript",
"bytes": "2124"
},
{
"name": "Makefile",
"bytes": "1403"
},
{
"name": "Python",
"bytes": "149532"
},
{
"name": "Shell",
"bytes": "130"
}
],
"symlink_target": ""
} |
import mock
import pytest
import furl
import pytz
import datetime
from urlparse import urlparse
from nose.tools import * # noqa:
from addons.wiki.models import WikiPage
from addons.wiki.tests.factories import (
WikiFactory,
WikiVersionFactory,
)
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import Guid
from osf.utils import permissions
from osf_tests.factories import (
AuthUserFactory,
CommentFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
)
from tests.base import ApiWikiTestCase, fake
def make_rename_payload(wiki_page):
new_page_name = fake.word()
payload = {
'data': {
'id': wiki_page._id,
'type': 'wikis',
'attributes': {
'name': new_page_name
}
}
}
return payload, new_page_name
@pytest.mark.django_db
class WikiCRUDTestCase:
@pytest.fixture()
def user_creator(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user_creator):
project_public = ProjectFactory(
is_public=True,
creator=user_creator
)
wiki_page = WikiFactory(node=project_public, user=user_creator)
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return project_public
@pytest.fixture()
def project_private(self, user_creator):
project_private = ProjectFactory(
is_public=False,
creator=user_creator
)
wiki_page = WikiFactory(node=project_private, user=user_creator)
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return project_private
@pytest.fixture()
def user_non_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contributor(self, project_public, project_private):
user = AuthUserFactory()
project_public.add_contributor(user, permissions=[permissions.WRITE])
project_private.add_contributor(user, permissions=[permissions.WRITE])
return user
@pytest.fixture()
def user_read_contributor(self, project_public, project_private):
user = AuthUserFactory()
project_public.add_contributor(user, permissions=[permissions.READ])
project_private.add_contributor(user, permissions=[permissions.READ])
return user
@pytest.fixture()
def wiki_public(self, project_public, user_creator):
wiki_page = WikiFactory(node=project_public, user=user_creator, page_name=fake.word())
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return wiki_page
@pytest.fixture()
def wiki_private(self, project_private, user_creator):
wiki_page = WikiFactory(node=project_private, user=user_creator, page_name=fake.word())
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return wiki_page
@pytest.fixture()
def wiki_publicly_editable(self, project_public, user_creator):
pass
@pytest.fixture()
def wiki_registration_public(self, project_public, user_creator):
registration = RegistrationFactory(project=project_public, is_public=True)
wiki_page = WikiFactory(node=registration, user=user_creator, page_name=fake.word())
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return wiki_page
@pytest.fixture()
def wiki_registration_private(self, project_public, user_creator):
registration = RegistrationFactory(project=project_public, is_public=False)
wiki_page = WikiFactory(node=registration, user=user_creator, page_name=fake.word())
WikiVersionFactory(wiki_page=wiki_page, user=user_creator)
return wiki_page
@pytest.fixture()
def url_wiki_public(self, wiki_public):
return '/{}wikis/{}/'.format(API_BASE, wiki_public._id)
@pytest.fixture()
def url_wiki_home(self, project_public):
wiki_home = project_public.wikis.get(page_name='home')
return '/{}wikis/{}/'.format(API_BASE, wiki_home._id)
@pytest.fixture()
def url_wiki_private(self, wiki_private):
return '/{}wikis/{}/'.format(API_BASE, wiki_private._id)
@pytest.fixture()
def url_wiki_publicly_editable(self, wiki_publicly_editable):
# return '/{}wikis/{}/'.format(API_BASE, wiki_publicly_editable._id)
pass
@pytest.fixture()
def url_registration_wiki_public(self, wiki_registration_public):
return '/{}wikis/{}/'.format(API_BASE, wiki_registration_public._id)
@pytest.fixture()
def url_registration_wiki_private(self, wiki_registration_private):
return '/{}wikis/{}/'.format(API_BASE, wiki_registration_private._id)
class TestWikiDetailView(ApiWikiTestCase):
def _set_up_public_project_with_wiki_page(self, project_options=None):
project_options = project_options or {}
self.public_project = ProjectFactory(is_public=True, creator=self.user, **project_options)
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
with mock.patch('osf.models.AbstractNode.update_search'):
self.public_wiki_page = WikiFactory(node=self.public_project, user=self.user)
self.public_wiki = WikiVersionFactory(wiki_page=self.public_wiki_page, user=self.user)
self.public_url = '/{}wikis/{}/'.format(API_BASE, self.public_wiki_page._id)
return self.public_wiki_page
def _set_up_private_project_with_wiki_page(self):
self.private_project = ProjectFactory(creator=self.user)
self.private_wiki = self._add_project_wiki_page(
self.private_project, self.user)
self.private_url = '/{}wikis/{}/'.format(
API_BASE, self.private_wiki._id)
def _set_up_public_registration_with_wiki_page(self):
self._set_up_public_project_with_wiki_page()
self.public_registration = RegistrationFactory(
project=self.public_project, user=self.user, is_public=True)
self.public_registration_wiki_id = WikiPage.objects.get_for_node(self.public_registration, 'home')._id
self.public_registration.save()
self.public_registration_url = '/{}wikis/{}/'.format(
API_BASE, self.public_registration_wiki_id)
def _set_up_private_registration_with_wiki_page(self):
self._set_up_private_project_with_wiki_page()
self.private_registration = RegistrationFactory(
project=self.private_project, user=self.user)
self.private_registration_wiki_id = WikiPage.objects.get_for_node(self.private_registration, 'home')._id
self.private_registration.save()
self.private_registration_url = '/{}wikis/{}/'.format(
API_BASE, self.private_registration_wiki_id)
def test_public_node_logged_out_user_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki_page._id)
def test_public_node_logged_in_non_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki_page._id)
def test_public_node_logged_in_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki_page._id)
def test_private_node_logged_out_user_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'],
'Authentication credentials were not provided.')
def test_private_node_logged_in_non_contributor_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(
self.private_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_node_logged_in_contributor_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_anonymous_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.add(self.private_project)
private_link.save()
url = furl.furl(
self.private_url).add(
query_params={
'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_view_only_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(self.private_project)
private_link.save()
url = furl.furl(
self.private_url).add(
query_params={
'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_public_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_logged_in_non_contributor_cannot_view_wiki(
self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(
self.public_registration_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_contributor_can_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_user_cannot_view_withdrawn_registration_wikis(self):
self._set_up_public_registration_with_wiki_page()
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal = self.public_registration.retract_registration(
user=self.user, save=True)
token = withdrawal.approval_state.values()[0]['approval_token']
withdrawal.approve_retraction(self.user, token)
withdrawal.save()
res = self.app.get(
self.public_registration_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'],
'Authentication credentials were not provided.')
def test_private_registration_logged_in_non_contributor_cannot_view_wiki(
self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(
self.private_registration_url,
auth=self.non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(
res.json['errors'][0]['detail'],
'You do not have permission to perform this action.')
def test_private_registration_contributor_can_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_registration_wiki_id)
def test_wiki_has_user_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, self.user._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_node_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_comments_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
url = res.json['data']['relationships']['comments']['links']['related']['href']
CommentFactory(
node=self.public_project,
target=Guid.load(
self.public_wiki_page._id),
user=self.user)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['type'], 'comments')
def test_only_project_contrib_can_comment_on_closed_project(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'private'})
res = self.app.get(self.public_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'public'})
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self._set_up_public_project_with_wiki_page(
project_options={'comment_level': 'public'})
res = self.app.get(self.public_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_wiki_has_download_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['links']['download']
expected_url = '/{}wikis/{}/content/'.format(
API_BASE, self.public_wiki_page._id)
assert_equal(res.status_code, 200)
assert_in(expected_url, url)
def test_wiki_invalid_id_not_found(self):
url = '/{}wikis/{}/'.format(API_BASE, 'abcde')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_deleted_wiki_not_returned(self):
self._set_up_public_project_with_wiki_page()
url = '/{}wikis/{}/'.format(
API_BASE, self.public_wiki_page._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
self.public_wiki_page.deleted = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
self.public_wiki_page.save()
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 410)
def test_public_node_wiki_relationship_links(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, self.public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, self.public_project._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_node_wiki_relationship_links(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, self.private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, self.private_project._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_public_registration_wiki_relationship_links(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, self.public_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, self.public_registration._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_registration_wiki_relationship_links(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, self.private_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, self.private_registration._id)
assert_in(
expected_nodes_relationship_url,
res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(
expected_comments_relationship_url,
res.json['data']['relationships']['comments']['links']['related']['href'])
def test_do_not_return_disabled_wiki(self):
self._set_up_public_project_with_wiki_page()
self.public_project.delete_addon('wiki', auth=Auth(self.user))
res = self.app.get(self.public_url, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestWikiDelete(WikiCRUDTestCase):
def test_delete_public_wiki_page(
self, app, user_write_contributor, url_wiki_public
):
res = app.delete(url_wiki_public, auth=user_write_contributor.auth)
assert res.status_code == 204
def test_do_not_delete_public_wiki_page(
self, app, user_creator, user_read_contributor, user_non_contributor,
url_wiki_public, url_wiki_home, url_wiki_publicly_editable
):
# test_do_not_delete_home_wiki_page
res = app.delete(url_wiki_home, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'The home wiki page cannot be deleted.'
# test_do_not_delete_public_wiki_page_as_read_contributor
res = app.delete(url_wiki_public, auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_delete_public_wiki_page_as_non_contributor
res = app.delete(url_wiki_public, auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_delete_public_wiki_page_as_unauthenticated
res = app.delete(url_wiki_public, expect_errors=True)
assert res.status_code == 401
def test_delete_private_wiki_page(self, app, user_write_contributor, url_wiki_private):
res = app.delete(url_wiki_private, auth=user_write_contributor.auth)
assert res.status_code == 204
def test_do_not_delete_private_wiki_page(
self, app, user_read_contributor, user_non_contributor, url_wiki_private
):
# test_do_not_delete_private_wiki_page_as_read_contributor
res = app.delete(url_wiki_private, auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_delete_private_wiki_page_as_non_contributor
res = app.delete(url_wiki_private, auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_delete_private_wiki_page_as_unauthenticated
res = app.delete(url_wiki_private, expect_errors=True)
assert res.status_code == 401
def test_do_not_delete_registration_wiki_page(
self, app, user_creator,
url_registration_wiki_public, url_registration_wiki_private
):
# test_do_not_delete_wiki_on_public_registration
res = app.delete(url_registration_wiki_public, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
# test_do_not_delete_wiki_on_embargoed_registration
res = app.delete(url_registration_wiki_private, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
@pytest.mark.django_db
class TestWikiUpdate(WikiCRUDTestCase):
def test_rename_public_wiki_page(
self, app, user_write_contributor, url_wiki_public, wiki_public
):
payload, new_name = make_rename_payload(wiki_public)
res = app.patch_json_api(url_wiki_public, payload, auth=user_write_contributor.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == new_name
def test_do_not_update_content_public_wiki_page(
self, app, user_write_contributor, url_wiki_public, wiki_public
):
res = app.patch_json_api(
url_wiki_public,
{
'data': {
'id': wiki_public._id,
'type': 'wikis',
'attributes': {
'name': 'new page name',
'content': 'brave new wiki'
}
}
},
auth=user_write_contributor.auth
)
assert res.status_code == 200
assert wiki_public.get_version().content != 'brave new wiki'
def test_do_not_rename_public_wiki_page(
self, app, wiki_public, project_public,
user_creator, user_read_contributor, user_non_contributor,
url_wiki_public, url_wiki_home, url_wiki_publicly_editable
):
# test_do_not_rename_home_wiki_page
wiki_home = project_public.wikis.get(page_name='home')
payload, _ = make_rename_payload(wiki_home)
res = app.patch_json_api(url_wiki_home, payload, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Cannot rename wiki home page'
# test_do_not_rename_public_wiki_page_as_read_contributor
payload, _ = make_rename_payload(wiki_public)
res = app.patch_json_api(url_wiki_public, payload, auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_rename_public_wiki_page_as_non_contributor
res = app.patch_json_api(url_wiki_public, payload, auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_rename_public_wiki_page_as_unauthenticated
res = app.patch_json_api(url_wiki_public, payload, expect_errors=True)
assert res.status_code == 401
def test_rename_private_wiki_page(
self, app, user_write_contributor, wiki_private, url_wiki_private
):
payload, new_name = make_rename_payload(wiki_private)
res = app.patch_json_api(url_wiki_private, payload, auth=user_write_contributor.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == new_name
def test_do_not_rename_private_wiki_page(
self, app, wiki_private,
user_read_contributor, user_non_contributor, url_wiki_private
):
# test_do_not_rename_public_wiki_page_as_read_contributor
payload, _ = make_rename_payload(wiki_private)
res = app.patch_json_api(url_wiki_private, payload, auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_rename_public_wiki_page_as_non_contributor
res = app.patch_json_api(url_wiki_private, payload, auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_rename_public_wiki_page_as_unauthenticated
res = app.patch_json_api(url_wiki_private, payload, expect_errors=True)
assert res.status_code == 401
def test_do_not_rename_registration_wiki_page(
self, app, wiki_registration_public, wiki_registration_private,
user_creator, url_registration_wiki_public, url_registration_wiki_private
):
# test_do_not_rename_wiki_on_public_registration
payload, _ = make_rename_payload(wiki_registration_public)
res = app.patch_json_api(url_registration_wiki_public, payload, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
# test_do_not_rename_wiki_on_embargoed_registration
payload, _ = make_rename_payload(wiki_registration_private)
res = app.patch_json_api(url_registration_wiki_private, payload, auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
| {
"content_hash": "6f11d8292b7121e5575d375a4c303a9c",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 116,
"avg_line_length": 43.97738287560582,
"alnum_prop": 0.6433399456322092,
"repo_name": "pattisdr/osf.io",
"id": "c1d03befd4fce3dedf34cedd864bc242e94c4f1b",
"size": "27222",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api_tests/wikis/views/test_wiki_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "306671"
},
{
"name": "JavaScript",
"bytes": "1790426"
},
{
"name": "Mako",
"bytes": "647535"
},
{
"name": "Python",
"bytes": "9601810"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import bisect
import collections
from typing import Dict, List, Optional, Set, Tuple, Union
from uqbar.objects import new
import supriya # noqa
from supriya.nonrealtime.NodeTransition import NodeTransition
from supriya.nonrealtime.SessionObject import SessionObject
from supriya.nonrealtime.State import State
class Node(SessionObject):
"""
A non-realtime node.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Objects"
__slots__ = ("_duration", "_events", "_session", "_session_id", "_start_offset")
_valid_add_actions: Tuple[int, ...] = ()
### INITIALIZER ###
def __init__(
self,
session,
session_id: int,
duration: float = None,
start_offset: float = None,
) -> None:
SessionObject.__init__(self, session)
self._session_id = int(session_id)
start_offset = start_offset or 0
self._start_offset = float(start_offset)
if duration is None:
duration = float("inf")
self._duration = duration
self._events: Dict[str, List[Tuple[float, float]]] = {}
### SPECIAL METHODS ###
def __repr__(self) -> str:
return "<{} #{} @{}:{}>".format(
type(self).__name__, self.session_id, self.start_offset, self.stop_offset
)
### SPECIAL METHODS ###
def __getitem__(self, item: str) -> float:
assert self.session._active_moments
offset = self.session._active_moments[-1].offset
return self._get_at_offset(offset, item) or 0
def __setitem__(
self,
item: str,
value: Union[float, "supriya.nonrealtime.Bus", "supriya.nonrealtime.BusGroup"],
) -> None:
import supriya.nonrealtime
assert self.session._active_moments
offset = self.session._active_moments[-1].offset
assert isinstance(
value, (int, float, supriya.nonrealtime.Bus, supriya.nonrealtime.BusGroup)
)
self._set_at_offset(offset, item, value)
### PRIVATE METHODS ###
def _add_node(self, node: "Node", add_action: int) -> "Node":
state = self.session._find_state_at(node.start_offset, clone_if_missing=True)
state.start_nodes.add(node)
if node not in state.nodes_to_children:
state.nodes_to_children[node] = None
state = self.session._find_state_at(node.stop_offset, clone_if_missing=True)
state.stop_nodes.add(node)
self.move_node(node, add_action=add_action)
self.session.nodes.add(node)
self.session._apply_transitions([node.start_offset, node.stop_offset])
return node
def _collect_settings(self, offset: float, id_mapping=None, persistent=False):
settings: Dict[str, float] = {}
if persistent:
for key in self._events:
value = self._get_at_offset(offset, key) or 0.0
if id_mapping and value in id_mapping:
value = id_mapping[value]
settings[key] = value
else:
for key, events in self._events.items():
events = events[:]
for i, (event_offset, value) in enumerate(events):
# TODO: This is dreadfully inefficient.
if id_mapping and value in id_mapping:
value = id_mapping[value]
events[i] = (event_offset, value)
index = bisect.bisect_left(events, (offset, 0.0))
if len(events) <= index:
continue
event_offset, value = events[index]
if offset == event_offset:
settings[key] = value
return settings
def _fixup_duration(self, new_duration: float) -> None:
old_duration = self._duration
if old_duration == new_duration:
return
with self.session.at(self.stop_offset, propagate=False) as moment:
if self in moment.state.stop_nodes:
moment.state.stop_nodes.remove(self)
moment.state._sparsify()
self.session.nodes.remove(self)
self._duration = new_duration
self.session.nodes.add(self)
with self.session.at(self.stop_offset, propagate=False) as moment:
moment.state.stop_nodes.add(self)
def _fixup_events(self, new_node: "Node", split_offset: float) -> None:
left_events: Dict[str, List[Tuple[float, float]]] = {}
right_events: Dict[str, List[Tuple[float, float]]] = {}
for name, events in self._events.items():
for offset, value in events:
if offset < split_offset:
left_events.setdefault(name, []).append((offset, value))
else:
right_events.setdefault(name, []).append((offset, value))
for name, events in left_events.items():
if name in right_events and right_events[name][0][0] == split_offset:
continue
event = (split_offset, events[-1][-1])
right_events.setdefault(name, []).insert(0, event)
self._events = left_events
new_node._events = right_events
def _fixup_node_actions(
self, new_node: "Node", start_offset: "float", stop_offset: "float"
) -> None:
for offset in sorted(self.session.states):
if offset < start_offset:
continue
elif stop_offset < offset:
break
transitions = self.session.states[offset].transitions
if self in transitions:
transitions[new_node] = transitions.pop(self)
for node, action in transitions.items():
if node is new_node:
continue
if action.target is self:
action._target = new_node
def _get_at_offset(self, offset: float, item: str) -> Optional[float]:
"""
Relative to Node start offset.
"""
events = self._events.get(item)
if not events:
return None
index = bisect.bisect_left(events, (offset, 0.0))
if len(events) <= index:
old_offset, value = events[-1]
else:
old_offset, value = events[index]
if old_offset == offset:
return value
index -= 1
if index < 0:
return None
_, value = events[index]
return value
def _set_at_offset(self, offset, item, value):
"""
Relative to Synth start offset.
"""
if offset < self.start_offset or self.stop_offset <= offset:
return
events = self._events.setdefault(item, [])
new_event = (offset, value)
if not events:
events.append(new_event)
return
index = bisect.bisect_left(events, new_event)
if len(events) <= index:
events.append(new_event)
old_offset, old_value = events[index]
if old_offset == offset:
events[index] = (offset, value)
else:
events.insert(index, new_event)
def _split(
self,
split_offset: float,
new_nodes=None,
split_occupiers: bool = True,
split_traversers: bool = True,
) -> List["Node"]:
import supriya.nonrealtime
new_nodes = new_nodes or []
state = self.session.states[split_offset]
entering, exiting, occupying, starting, _ = self.inspect_children()
children = state.nodes_to_children.get(self) or ()
start_offset, stop_offset = self.start_offset, self.stop_offset
if start_offset < split_offset < stop_offset:
old_actions = state.transitions
new_duration = stop_offset - split_offset
with supriya.nonrealtime.DoNotPropagate():
if isinstance(self, supriya.nonrealtime.Synth):
new_node = self.add_synth(
add_action="ADD_BEFORE",
duration=new_duration,
synthdef=self.synthdef,
**self._synth_kwargs,
)
else:
new_node = self.add_group(
add_action="ADD_BEFORE", duration=new_duration
)
new_nodes.append(new_node)
new_actions: Dict["Node", NodeTransition] = collections.OrderedDict()
for node in new_nodes:
if node is new_node and self in old_actions:
old_actions.pop(node)
action = old_actions.pop(self)
new_actions[node] = new(action, source=new_node)
else:
new_actions[node] = old_actions.pop(node)
for child in reversed(children):
if child in old_actions:
old_actions.pop(child)
action = supriya.nonrealtime.NodeTransition(
source=child, target=new_node, action="ADD_TO_TAIL"
)
new_actions[child] = action
new_actions.update(old_actions)
state._transitions = new_actions
self._fixup_events(new_node, split_offset)
self._fixup_duration(split_offset - start_offset)
self._fixup_node_actions(new_node, split_offset, stop_offset)
self.session._apply_transitions(
[new_node.start_offset, new_node.stop_offset]
)
result = [self, new_node]
else:
return [self]
for child in children + exiting:
if (
(split_occupiers and child in occupying)
or (split_traversers and child in entering)
or (split_traversers and child in exiting)
):
child._split(
split_offset,
new_nodes=new_nodes,
split_occupiers=split_occupiers,
split_traversers=split_traversers,
)
return result
### CONSTRUCTORS ###
@SessionObject.require_offset
def add_group(
self, add_action: int = None, duration: float = None, offset: float = None
) -> "supriya.nonrealtime.Group":
import supriya.nonrealtime
if add_action is None:
add_action = self._valid_add_actions[0]
add_action = supriya.AddAction.from_expr(add_action)
assert add_action in self._valid_add_actions
session_id = self.session._get_next_session_id("node")
node = supriya.nonrealtime.Group(
self.session, duration=duration, session_id=session_id, start_offset=offset
)
self._add_node(node, add_action)
return node
@SessionObject.require_offset
def add_synth(
self,
add_action: int = None,
duration: float = None,
synthdef=None,
offset: float = None,
**synth_kwargs,
) -> "supriya.nonrealtime.Synth":
import supriya.assets.synthdefs
import supriya.nonrealtime
if add_action is None:
add_action = self._valid_add_actions[0]
add_action = supriya.AddAction.from_expr(add_action)
assert add_action in self._valid_add_actions
session_id = self.session._get_next_session_id("node")
synthdef = synthdef or supriya.assets.synthdefs.default
node = supriya.nonrealtime.Synth(
self.session,
session_id=session_id,
duration=duration,
start_offset=offset,
synthdef=synthdef,
**synth_kwargs,
)
self._add_node(node, add_action)
return node
### MUTATORS ###
@SessionObject.require_offset
def move_node(
self, node: "Node", add_action: int = None, offset: float = None
) -> "Node":
import supriya.nonrealtime
state: State = self.session.active_moments[-1].state
if state.nodes_to_parents is None:
state._desparsify()
if node in state.nodes_to_parents and node in self.get_parentage():
raise ValueError("Can't add parent as a child.")
if add_action is None:
add_action = self._valid_add_actions[0]
add_action = supriya.AddAction.from_expr(add_action)
assert add_action in self._valid_add_actions
node_action = supriya.nonrealtime.NodeTransition(
source=node, target=self, action=add_action
)
state.transitions[node] = node_action
self.session._apply_transitions([state.offset, node.stop_offset])
return node
def delete(self) -> None:
start_state = self.session._find_state_at(self.start_offset)
start_state.start_nodes.remove(self)
stop_state = self.session._find_state_at(self.stop_offset)
stop_state.stop_nodes.remove(self)
start_offset = self.session._find_state_before(
self.start_offset, with_node_tree=True
).offset
for state_one, state_two in self.session._iterate_state_pairs(
start_offset, with_node_tree=True
):
state_two._desparsify()
if self in state_two.nodes_to_children:
parent = state_two.nodes_to_parents.pop(self)
inner_children = state_two.nodes_to_children.pop(self) or ()
outer_children = list(state_two.nodes_to_children[parent])
index = outer_children.index(self)
outer_children[index : index + 1] = inner_children
for child in inner_children:
state_two.nodes_to_parents[child] = parent
state_two.nodes_to_children[parent] = tuple(outer_children) or None
state_two._transitions = state_two._rebuild_transitions(
state_one, state_two
)
if state_two == self.stop_offset:
break
self.session.nodes.remove(self)
self.session._apply_transitions([self.start_offset, self.stop_offset])
def set_duration(self, new_duration: float, clip_children: bool = False) -> "Node":
import supriya.nonrealtime
assert new_duration > 0
if self.duration == new_duration:
return self
if new_duration < self.duration:
split_offset = self.start_offset + new_duration
if clip_children:
with self.session.at(split_offset) as moment:
old_node, new_node = self.split(
split_occupiers=True, split_traversers=True
)
state = moment.state
children = reversed(
list(state._iterate_nodes(new_node, state.nodes_to_children))
)
for child in children:
child.delete()
else:
with self.session.at(split_offset):
old_node, new_node = self.split(
split_occupiers=False, split_traversers=False
)
new_node.delete()
self.session._find_state_at(new_node.stop_offset)._sparsify()
return old_node
else:
old_stop_offset = self.stop_offset
new_stop_offset = self.start_offset + new_duration
with self.session.at(old_stop_offset, propagate=False) as moment:
parent = self.get_parent()
moment.state.stop_nodes.remove(self)
moment.state._sparsify()
self._fixup_duration(new_duration)
with self.session.at(old_stop_offset, propagate=False) as moment:
moment.state._sparsify()
while parent is not None and parent.stop_offset < new_stop_offset:
with self.session.at(parent.stop_offset, propagate=False) as moment:
action = supriya.nonrealtime.NodeTransition(
source=self, target=parent, action="ADD_BEFORE"
)
moment.state.transitions[self] = action
parent = parent.get_parent()
with self.session.at(new_stop_offset, propagate=False) as moment:
moment.state.stop_nodes.add(self)
with self.session.at(self.start_offset, propagate=False) as moment:
self.session._apply_transitions(moment.state.offset)
self.session._apply_transitions(
[self.start_offset, old_stop_offset, new_stop_offset]
)
return self
@SessionObject.require_offset
def split(
self,
split_occupiers: bool = True,
split_traversers: bool = True,
offset: float = None,
) -> List["Node"]:
if offset is None:
raise ValueError
state = self.session.active_moments[-1].state
self.session._apply_transitions(state.offset)
shards = self._split(
offset, split_occupiers=split_occupiers, split_traversers=split_traversers
)
stop_offset = shards[-1].stop_offset
while state is not None and state.offset <= stop_offset:
self.session._apply_transitions(state.offset)
state = self.session._find_state_after(state.offset)
return shards
### RELATIONS ###
@SessionObject.require_offset
def inspect_children(
self, offset: float = None
) -> Tuple[
Tuple["Node", ...],
Tuple["Node", ...],
Tuple["Node", ...],
Tuple["Node", ...],
Tuple["Node", ...],
]:
this_state = self.session._find_state_at(offset, clone_if_missing=True)
prev_state = self.session._find_state_before(this_state.offset, True)
prev_state._desparsify()
this_state._desparsify()
prev_children = prev_state.nodes_to_children.get(self) or ()
this_children = this_state.nodes_to_children.get(self) or ()
entering: Set["Node"] = set()
exiting: Set["Node"] = set()
occupying: Set["Node"] = set()
starting: Set["Node"] = set()
stopping: Set["Node"] = set()
for node in prev_children:
if node.stop_offset == offset:
stopping.add(node)
continue
if node in this_children:
occupying.add(node)
else:
exiting.add(node)
for node in this_children:
if node.start_offset == offset:
starting.add(node)
continue
if node.stop_offset == offset:
stopping.add(node)
continue
if node in prev_children:
occupying.add(node)
else:
entering.add(node)
return (
tuple(sorted(entering, key=lambda x: x.session_id)),
tuple(sorted(exiting, key=lambda x: x.session_id)),
tuple(sorted(occupying, key=lambda x: x.session_id)),
tuple(sorted(starting, key=lambda x: x.session_id)),
tuple(sorted(stopping, key=lambda x: x.session_id)),
)
@SessionObject.require_offset
def get_parent(self, offset: float = None) -> Optional["Node"]:
state = self.session._find_state_at(offset, clone_if_missing=True)
if not state.nodes_to_children:
state = self.session._find_state_before(state.offset, True)
elif self.stop_offset == state.offset:
state = self.session._find_state_before(state.offset, True)
return state.nodes_to_parents.get(self)
@SessionObject.require_offset
def get_parentage(self, offset: float = None) -> List["Node"]:
state = self.session._find_state_at(offset, clone_if_missing=True)
if not state.nodes_to_children:
state = self.session._find_state_before(state.offset, True)
node = self
parentage = [node]
while state.nodes_to_parents[node] is not None:
parent = state.nodes_to_parents[node]
parentage.append(parent)
node = parent
return parentage
### PUBLIC PROPERTIES ###
@property
def duration(self) -> float:
return self._duration
@property
def session_id(self) -> int:
return self._session_id
@property
def start_offset(self) -> float:
return self._start_offset
@property
def stop_offset(self) -> float:
if self.duration is None:
return float("inf")
return self.start_offset + self.duration
| {
"content_hash": "b6165965bbde0655eda4608b0e569d42",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 87,
"avg_line_length": 38.80451127819549,
"alnum_prop": 0.5593392753342376,
"repo_name": "Pulgama/supriya",
"id": "20956617d90219189a934b9935369dd47599997d",
"size": "20644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/nonrealtime/Node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
import os
import math
import platform
import zipfile
from os import path
from PIL import Image
def find_files(directory, extensions):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(extensions):
filename = path.join(root, basename)
yield filename
def makedirs(from_path, to_path, files):
for filepath in files:
dirname = path.relpath(path.dirname(filepath), from_path)
newdir = path.join(to_path, dirname)
if not path.exists(newdir):
os.makedirs(newdir)
def resize(from_path, to_path, factor, filepath):
name, ext = path.splitext(filepath)
ext = ext.lower()
dirname = path.relpath(path.dirname(filepath), from_path)
try:
im = Image.open(filepath, 'r')
size = im.size
newsize = int(math.floor(factor * size[0])), int(math.floor(factor * size[1]))
newimage = im.resize(newsize, Image.ANTIALIAS)
del im
except Exception:
print 'RESIZE LOAD ERROR: ' + path.relpath(filepath, from_path)
return
newdir = path.join(to_path, dirname)
newfilepath = path.join(newdir, path.basename(filepath))
type = ext.strip('.').upper()
try:
if type == 'JPG' or type == 'JPEG':
try:
newimage.save(newfilepath, 'JPEG', quality=95, optimize=True)
except IOError:
newimage.save(newfilepath, 'JPEG', quality=95)
elif type == 'PNG':
newimage.save(newfilepath, 'PNG', optimize=True)
except Exception:
print 'RESIZE SAVE ERROR: ' + path.relpath(filepath, from_path)
finally:
del newimage
def optimize_png(filepath):
if not filepath.endswith('.png'):
return
try:
im = Image.open(filepath, 'r')
im.save(filepath, 'PNG', optimize=True)
del im
except:
print 'OPTIMIZE ERROR:', filepath
PNGOUT_OPTION = ' -y' if platform.system().lower() in ('linux', 'darwin') else ' /y'
def optimize_pngout(filepath):
if filepath.endswith('.png'):
os.system('pngout "%s" %s' % (filepath, PNGOUT_OPTION))
return 0
class ResizeFunctor(object):
def __init__(self, from_path, to_path, scale_factor):
self.from_path = from_path
self.to_path = to_path
self.scale_factor = scale_factor
def __call__(self, filepath):
resize(self.from_path, self.to_path, self.scale_factor, filepath)
def zipdir(dirpath):
dirpath = path.abspath(dirpath)
print dirpath
project_dir = path.basename(dirpath)
zipname = project_dir + '.zip'
zip = zipfile.ZipFile(path.join(dirpath, '..', zipname), 'w')
ignorefiles = ['.DS_Store', zipname]
for root, dirs, files in os.walk(dirpath):
for file in files:
if file in ignorefiles:
continue
filepath = path.join(root, file)
zip.write(filepath, path.join(project_dir, path.relpath(filepath, dirpath)))
zip.close()
| {
"content_hash": "54a050f8df2a84a3910f851df6899de5",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 30.68686868686869,
"alnum_prop": 0.6099407504937459,
"repo_name": "eliasku/imwork",
"id": "2c8b23f9dc1de224153f0fd06983a29869d963e2",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imwork/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9415"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import os.path
import signal
def get_postfix(platform):
if platform == 'win32':
return '.exe'
elif platform == 'darwin':
return '.app'
else:
return ''
PRJ_NAME_BASE = "UnitTests"
PRJ_POSTFIX = get_postfix(sys.platform)
start_on_android = False
start_on_ios = False
if len(sys.argv) > 1:
if sys.argv[1] == "android":
start_on_android = True
elif sys.argv[1] == "ios":
start_on_ios = True
sub_process = None
def start_unittests_on_android_device():
global sub_process
# if screen turned off
device_state = subprocess.check_output(['adb', 'shell', 'dumpsys', 'power'])
if device_state.find("mScreenOn=false") != -1:
# turn screen on
subprocess.check_call(['adb', 'shell', 'input', 'keyevent', '26'])
# unlock device screen
subprocess.check_call(['adb', 'shell', 'input', 'keyevent', '82'])
# clear log before start tests
subprocess.check_call(["adb", "logcat", "-c"])
# start adb logcat and gather output DO NOT filter by TeamcityOutput tag
# because we need interrupt gather log when unittests process finished
sub_process = subprocess.Popen(
["adb", "logcat", "-s", "TeamcityOutput"],
stdout=subprocess.PIPE)
# start unittests on device
subprocess.Popen(
["adb", "shell", "am", "start", "-n", "com.dava.unittests/com.dava.unittests." + PRJ_NAME_BASE])
return sub_process
if start_on_ios:
# ../build/ios-deploy -d --noninteractive -b ../build/UnitTests.app
sub_process = subprocess.Popen(["./ios-deploy", "-d", "--noninteractive", "-b", "../build/" +
PRJ_NAME_BASE + PRJ_POSTFIX],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("copy " + PRJ_NAME_BASE + PRJ_POSTFIX + " on device and run")
elif start_on_android:
sub_process = start_unittests_on_android_device()
elif sys.platform == 'win32':
if os.path.isfile("..\\Release\\app\\" + PRJ_NAME_BASE + PRJ_POSTFIX): # run on build server (TeamCity)
sub_process = subprocess.Popen(["..\\Release\\app\\" + PRJ_NAME_BASE + PRJ_POSTFIX], cwd="./..",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
sub_process = subprocess.Popen(["..\\Release\\" + PRJ_NAME_BASE + PRJ_POSTFIX], cwd="./..",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
elif sys.platform == "darwin":
if os.path.exists("./" + PRJ_NAME_BASE + PRJ_POSTFIX):
# if run on teamcity current dir is: Projects/UnitTests/DerivedData/TemplateProjectMacOS/Build/Products/Release
app_path = "./" + PRJ_NAME_BASE + PRJ_POSTFIX + "/Contents/MacOS/" + PRJ_NAME_BASE
else:
# run on local machine from dir: UnitTests/Report
# Warning! To make DerivedData relative to project go to
# Xcode->Preferences->Location->DerivedData select relative
app_path = "../DerivedData/TemplateProjectMacOS/Build/Products/Release/UnitTests.app/Contents/MacOS/" \
+ PRJ_NAME_BASE
sub_process = subprocess.Popen([app_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
app_exit_code = None
continue_process_stdout = True
while continue_process_stdout:
try:
line = sub_process.stdout.readline()
if line != '':
teamcity_line_index = line.find("##teamcity")
if teamcity_line_index != -1:
teamcity_line = line[teamcity_line_index:]
sys.stdout.write(teamcity_line)
sys.stdout.flush()
if line.find("Finish all tests.") != -1: # this text marker helps to detect good \
# finish tests on ios device (run with lldb)
app_exit_code = 0
if start_on_android:
# we want to exit from logcat process because sub_process.stdout.readline() will block
# current thread
if sys.platform == "win32":
sub_process.send_signal(signal.CTRL_C_EVENT)
else:
sub_process.send_signal(signal.SIGINT)
continue_process_stdout = False
else:
continue_process_stdout = False
except IOError as err:
sys.stdout.write(err.message)
sys.stdout.flush()
if app_exit_code is None:
app_exit_code = sub_process.poll()
sys.exit(app_exit_code)
| {
"content_hash": "7abdbdc571aff11013e7ce3ff3dcc0cc",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 119,
"avg_line_length": 40.544642857142854,
"alnum_prop": 0.5910592380532922,
"repo_name": "dava/dava.engine",
"id": "f0a25d5745b0f967f37c3a038aa6aa0435b824b0",
"size": "5264",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "Programs/TestBed/Scripts/start_unit_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "166572"
},
{
"name": "Batchfile",
"bytes": "18562"
},
{
"name": "C",
"bytes": "61621347"
},
{
"name": "C#",
"bytes": "574524"
},
{
"name": "C++",
"bytes": "50229645"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "11439187"
},
{
"name": "CSS",
"bytes": "32773"
},
{
"name": "Cuda",
"bytes": "37073"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "44259"
},
{
"name": "Fortran",
"bytes": "8835"
},
{
"name": "GLSL",
"bytes": "3726"
},
{
"name": "Go",
"bytes": "1235"
},
{
"name": "HTML",
"bytes": "8621333"
},
{
"name": "Java",
"bytes": "232072"
},
{
"name": "JavaScript",
"bytes": "2560"
},
{
"name": "Lua",
"bytes": "43080"
},
{
"name": "M4",
"bytes": "165145"
},
{
"name": "Makefile",
"bytes": "1349214"
},
{
"name": "Mathematica",
"bytes": "4633"
},
{
"name": "Module Management System",
"bytes": "15224"
},
{
"name": "Objective-C",
"bytes": "1909821"
},
{
"name": "Objective-C++",
"bytes": "498191"
},
{
"name": "Pascal",
"bytes": "99390"
},
{
"name": "Perl",
"bytes": "396608"
},
{
"name": "Python",
"bytes": "782784"
},
{
"name": "QML",
"bytes": "43105"
},
{
"name": "QMake",
"bytes": "156"
},
{
"name": "Roff",
"bytes": "71083"
},
{
"name": "Ruby",
"bytes": "22742"
},
{
"name": "SAS",
"bytes": "16030"
},
{
"name": "Shell",
"bytes": "2482394"
},
{
"name": "Slash",
"bytes": "117430"
},
{
"name": "Smalltalk",
"bytes": "5908"
},
{
"name": "TeX",
"bytes": "428489"
},
{
"name": "Vim script",
"bytes": "133255"
},
{
"name": "Visual Basic",
"bytes": "54056"
},
{
"name": "WebAssembly",
"bytes": "13987"
}
],
"symlink_target": ""
} |
"""Helper methods for common tasks."""
from __future__ import annotations
from collections.abc import Callable
import logging
from typing import TYPE_CHECKING, TypeVar
from soco import SoCo
from soco.exceptions import SoCoException, SoCoUPnPException
from typing_extensions import Concatenate, ParamSpec
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import SONOS_SPEAKER_ACTIVITY
from .exception import SonosUpdateError
if TYPE_CHECKING:
from .entity import SonosEntity
from .household_coordinator import SonosHouseholdCoordinator
from .speaker import SonosSpeaker
UID_PREFIX = "RINCON_"
UID_POSTFIX = "01400"
_LOGGER = logging.getLogger(__name__)
_T = TypeVar("_T", bound="SonosSpeaker | SonosEntity | SonosHouseholdCoordinator")
_R = TypeVar("_R")
_P = ParamSpec("_P")
def soco_error(
errorcodes: list[str] | None = None,
) -> Callable[ # type: ignore[misc]
[Callable[Concatenate[_T, _P], _R]], Callable[Concatenate[_T, _P], _R | None]
]:
"""Filter out specified UPnP errors and raise exceptions for service calls."""
def decorator(
funct: Callable[Concatenate[_T, _P], _R] # type: ignore[misc]
) -> Callable[Concatenate[_T, _P], _R | None]: # type: ignore[misc]
"""Decorate functions."""
def wrapper(self: _T, *args: _P.args, **kwargs: _P.kwargs) -> _R | None:
"""Wrap for all soco UPnP exception."""
args_soco = next((arg for arg in args if isinstance(arg, SoCo)), None)
try:
result = funct(self, *args, **kwargs)
except (OSError, SoCoException, SoCoUPnPException) as err:
error_code = getattr(err, "error_code", None)
function = funct.__qualname__
if errorcodes and error_code in errorcodes:
_LOGGER.debug(
"Error code %s ignored in call to %s", error_code, function
)
return None
# In order of preference:
# * SonosSpeaker instance
# * SoCo instance passed as an arg
# * SoCo instance (as self)
speaker_or_soco = getattr(self, "speaker", args_soco or self)
zone_name = speaker_or_soco.zone_name
# Prefer the entity_id if available, zone name as a fallback
# Needed as SonosSpeaker instances are not entities
target = getattr(self, "entity_id", zone_name)
message = f"Error calling {function} on {target}: {err}"
raise SonosUpdateError(message) from err
dispatch_soco = args_soco or self.soco
dispatcher_send(
self.hass,
f"{SONOS_SPEAKER_ACTIVITY}-{dispatch_soco.uid}",
funct.__qualname__,
)
return result
return wrapper
return decorator
def hostname_to_uid(hostname: str) -> str:
"""Convert a Sonos hostname to a uid."""
if hostname.startswith("Sonos-"):
baseuid = hostname.split("-")[1].replace(".local.", "")
elif hostname.startswith("sonos"):
baseuid = hostname[5:].replace(".local.", "")
else:
raise ValueError(f"{hostname} is not a sonos device.")
return f"{UID_PREFIX}{baseuid}{UID_POSTFIX}"
| {
"content_hash": "6d60bf94e2d19d700299a3f5f69ae109",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 83,
"avg_line_length": 36.64835164835165,
"alnum_prop": 0.6017991004497751,
"repo_name": "rohitranjan1991/home-assistant",
"id": "fbc1d2642eabb1d76bc24cb034fc4b1c5de5e3b8",
"size": "3335",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sonos/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
'''Inbound SQS worker.'''
import json
import logging
from utils.sqs import check_queue, delete_message
from utils.updatehelpers import update_curr_temp
queueURL = ''
logging.basicConfig(filename='SQSErrors.log')
def main():
while True:
response = check_queue(queueURL)
if response:
try:
receipt = response['Messages'][0]['ReceiptHandle']
message = response['Messages'][0]['Body']
message = json.loads(message)
username = message['Username']
password = message['Password']
new_temp = message['Temperature']
update_curr_temp(username, password, new_temp)
delete_message(queueURL, receipt)
except KeyError as e:
logging.warning('KeyError in inbound SQS. {}'.format(e))
if __name__ == '__main__':
main()
| {
"content_hash": "2c861a24b3f85554e768941f28da9b38",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 30.93103448275862,
"alnum_prop": 0.5785953177257525,
"repo_name": "ARowden/Python-Thermostat",
"id": "525e4e9f7b8bf916426f2b9eb89ceba7262308df",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ServerSide/inboundSQS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25923"
}
],
"symlink_target": ""
} |
"""
Script for building the example.
Usage:
python setup.py py2app
"""
from distutils.core import setup
import py2app
setup(
name='TableModel',
app=["TableModel.py"],
data_files=["English.lproj"],
)
| {
"content_hash": "625ed6090a32830534e2fcf8eb50e9d1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 33,
"avg_line_length": 15.5,
"alnum_prop": 0.6728110599078341,
"repo_name": "Khan/pyobjc-framework-Cocoa",
"id": "e070366174e8f2cc8f06d7ef450b79dee8cdc990",
"size": "217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Examples/AppKit/TableModel/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "5481"
},
{
"name": "Objective-C",
"bytes": "213902"
},
{
"name": "Python",
"bytes": "2450939"
}
],
"symlink_target": ""
} |
class Storage:
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
| {
"content_hash": "4943f375aaa002b3231f5c2890df7317",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.5031446540880503,
"repo_name": "Muzer/smartbot",
"id": "46714e2cbe51fecd9686929409f4c15a1b1768cb",
"size": "159",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "smartbot/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109234"
}
],
"symlink_target": ""
} |
"""A collection of random tools for dealing with dates in Python"""
from datetime import datetime, timedelta
from dateutil import parser
from dateutil.relativedelta import relativedelta
import calendar
#-------------------------------------------------------------------------------
# Miscellaneous date functions
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
raise Exception("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
def to_datetime(arg):
"""Attempts to convert arg to datetime"""
if arg is None or isinstance(arg, datetime):
return arg
try:
return parser.parse(arg)
except Exception:
return arg
def normalize_date(dt):
return datetime(dt.year, dt.month, dt.day)
#-------------------------------------------------------------------------------
# DateOffset
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing is date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
# For some offsets, want to drop the time information off the
# first date
_normalizeFirst = False
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
def apply(self, other):
if len(self.kwds) > 0:
if self.n > 0:
for i in xrange(self.n):
other = other + relativedelta(**self.kwds)
else:
for i in xrange(-self.n):
other = other - relativedelta(**self.kwds)
return other
else:
return other + timedelta(self.n)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, **self.kwds)
def _params(self):
attrs = sorted((item for item in self.__dict__.iteritems()
if item[0] != 'kwds'))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc'])
attrs = []
for attr in self.__dict__:
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
def __eq__(self, other):
return self._params() == other._params()
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
return self.apply(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset!')
elif type(other) == type(self):
return self.__class__(self.n - other.n, **self.kwds)
else: # pragma: no cover
raise TypeError('Cannot subtract %s from %s'
% (type(other), type(self)))
def __rsub__(self, other):
return self.__class__(-self.n, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, **self.kwds)
def rollback(self, someDate):
"""Roll provided date backward to next offset only if not on offset"""
if self._normalizeFirst:
someDate = normalize_date(someDate)
if not self.onOffset(someDate):
someDate = someDate - self.__class__(1, **self.kwds)
return someDate
def rollforward(self, someDate):
"""Roll provided date forward to next offset only if not on offset"""
if self._normalizeFirst:
someDate = normalize_date(someDate)
if not self.onOffset(someDate):
someDate = someDate + self.__class__(1, **self.kwds)
return someDate
def onOffset(self, someDate):
# Default (slow) method for determining if some date is a
# member of the DateRange generated by this offset. Subclasses
# may have this re-implemented in a nicer way.
return someDate == ((someDate + self) - self)
class BDay(DateOffset):
"""
DateOffset subclass representing possibly n business days
"""
_normalizeFirst = True
_outputName = 'BusinessDay'
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.normalize = kwds.get('normalize', True)
def __repr__(self):
className = getattr(self, '_outputName', self.__class__.__name__)
attrs = []
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.normalize:
result = datetime(result.year, result.month, result.day)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise Exception('Only know how to combine business day with '
'datetime or timedelta!')
@classmethod
def onOffset(cls, someDate):
return someDate.weekday() < 5
class MonthEnd(DateOffset):
_normalizeFirst = True
"""DateOffset of one month end"""
def apply(self, other):
n = self.n
_, nDaysInMonth = calendar.monthrange(other.year, other.month)
if other.day != nDaysInMonth:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@classmethod
def onOffset(cls, someDate):
__junk, nDaysInMonth = calendar.monthrange(someDate.year,
someDate.month)
return someDate.day == nDaysInMonth
class BMonthEnd(DateOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessMonthEnd'
_normalizeFirst = True
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
n = self.n
wkday, nDaysInMonth = calendar.monthrange(other.year, other.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
class Week(DateOffset):
"""
weekday
0: Mondays
1: Tuedays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.n = n
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise Exception('Day must be 0<=day<=6, got %d' %
self.weekday)
self.inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
def apply(self, other):
if self.weekday is None:
return other + self.n * self.inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
for i in xrange(k):
other = other + self.inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in xrange(-k):
other = other - self.inc
return other
def onOffset(self, someDate):
return someDate.weekday() == self.weekday
class BQuarterEnd(DateOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.n = n
self.startingMonth = kwds.get('startingMonth', 3)
if self.startingMonth < 1 or self.startingMonth > 3:
raise Exception('Start month must be 1<=day<=3, got %d'
% self.startingMonth)
self.offset = BMonthEnd(3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
def apply(self, other):
n = self.n
wkday, nDaysInMonth = calendar.monthrange(other.year, other.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3*n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, someDate):
modMonth = (someDate.month - self.startingMonth) % 3
return BMonthEnd().onOffset(someDate) and modMonth == 0
class BYearEnd(DateOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.month = kwds.get('month', 12)
if self.month < 1 or self.month > 12:
raise Exception('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, **kwds)
def apply(self, other):
n = self.n
if self._normalizeFirst:
other = normalize_date(other)
wkday, nDaysInMonth = calendar.monthrange(other.year, self.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = calendar.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month)
if result.weekday() > 4:
result = result - BDay()
return result
class YearEnd(DateOffset):
"""DateOffset increments between calendar year ends"""
_normalizeFirst = True
def apply(self, other):
n = self.n
if other.month != 12 or other.day != 31:
other = datetime(other.year - 1, 12, 31)
if n <= 0:
n = n + 1
other = other + relativedelta(years=n)
return other
@classmethod
def onOffset(cls, someDate):
return someDate.month == 12 and someDate.day == 31
class YearBegin(DateOffset):
"""DateOffset increments between calendar year begin dates"""
_normalizeFirst = True
def apply(self, other):
n = self.n
if other.month != 1 or other.day != 1:
other = datetime(other.year, 1, 1)
if n <= 0:
n = n + 1
other = other + relativedelta(years = n, day=1)
return other
@classmethod
def onOffset(cls, someDate):
return someDate.month == 1 and someDate.day == 1
#-------------------------------------------------------------------------------
# Ticks
class Tick(DateOffset):
_normalizeFirst = False
_delta = None
_inc = timedelta(microseconds=1000)
@property
def delta(self):
if self._delta is None:
self._delta = self.n * self._inc
return self._delta
def apply(self, other):
if isinstance(other, (datetime, timedelta)):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
class Hour(Tick):
_inc = timedelta(0, 3600)
class Minute(Tick):
_inc = timedelta(0, 60)
class Second(Tick):
_inc = timedelta(0, 1)
day = DateOffset()
bday = BDay(normalize=True)
businessDay = bday
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
businessMonthEnd = bmonthEnd
bquarterEnd = BQuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
#-------------------------------------------------------------------------------
# Offset names ("time rules") and related functions
_offsetMap = {
"WEEKDAY" : BDay(1),
"EOM" : BMonthEnd(1),
"W@MON" : Week(weekday=0),
"W@TUE" : Week(weekday=1),
"W@WED" : Week(weekday=2),
"W@THU" : Week(weekday=3),
"W@FRI" : Week(weekday=4),
"Q@JAN" : BQuarterEnd(startingMonth=1),
"Q@FEB" : BQuarterEnd(startingMonth=2),
"Q@MAR" : BQuarterEnd(startingMonth=3),
"A@JAN" : BYearEnd(month=1),
"A@FEB" : BYearEnd(month=2),
"A@MAR" : BYearEnd(month=3),
"A@APR" : BYearEnd(month=4),
"A@MAY" : BYearEnd(month=5),
"A@JUN" : BYearEnd(month=6),
"A@JUL" : BYearEnd(month=7),
"A@AUG" : BYearEnd(month=8),
"A@SEP" : BYearEnd(month=9),
"A@OCT" : BYearEnd(month=10),
"A@NOV" : BYearEnd(month=11),
"A@DEC" : BYearEnd()
}
_offsetNames = dict([(v, k) for k, v in _offsetMap.iteritems()])
def inferTimeRule(index):
if len(index) < 3:
raise Exception('Need at least three dates to infer time rule!')
first, second, third = index[:3]
for rule, offset in _offsetMap.iteritems():
if second == (first + offset) and third == (second + offset):
return rule
raise Exception('Could not infer time rule from data!')
def getOffset(name):
"""
Return DateOffset object associated with rule name
Example
-------
getOffset('EOM') --> BMonthEnd(1)
"""
offset = _offsetMap.get(name)
if offset is not None:
return offset
else:
raise Exception('Bad rule name requested: %s!' % name)
def hasOffsetName(offset):
return offset in _offsetNames
def getOffsetName(offset):
"""
Return rule name associated with a DateOffset object
Example
-------
getOffsetName(BMonthEnd(1)) --> 'EOM'
"""
name = _offsetNames.get(offset)
if name is not None:
return name
else:
raise Exception('Bad offset name requested: %s!' % offset)
| {
"content_hash": "98eaf356c91e5f42911925fc036128fc",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 80,
"avg_line_length": 30.98048780487805,
"alnum_prop": 0.5440088175090537,
"repo_name": "willgrass/pandas",
"id": "bb1b3a1f0b18ebac52c807eac5354ad2df00b8cf",
"size": "19053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/datetools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "637315"
},
{
"name": "Shell",
"bytes": "204"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20150326_1241'),
]
operations = [
migrations.AlterField(
model_name='user',
name='lang',
field=models.CharField(max_length=20, blank=True, null=True, default='', verbose_name='default language'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='timezone',
field=models.CharField(max_length=20, blank=True, null=True, default='', verbose_name='default timezone'),
preserve_default=True,
),
]
| {
"content_hash": "459e8d78cc652ca74b6f0da749c5e1cd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 118,
"avg_line_length": 29.24,
"alnum_prop": 0.5896032831737346,
"repo_name": "curiosityio/taiga-docker",
"id": "96eb7581bd628e19211b31fbf560423f2b44ca07",
"size": "755",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/users/migrations/0010_auto_20150414_0936.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
"""Test suite for :data:`sphinxcontrib.argdoc`.
Test implementation
-------------------
- Unit tests are provided for regular expressions used to identify different
patterns from :obj:`argparse` output
- Unit and functional tests e.g. for :func:`sphinxcontrib.argdoc.ext.format_argparser_as_docstring` are
performed as follows:
1. `Sphinx`_ is run on the test cases in :obj:`sphinxcontrib.argdoc.test.cases` using
the `reStructuredText`_ (`rst`) document stubs and configuration file
(``conf.py``) in ``sphinxcontrib/argdoc/test/testdocroot``. `rst` stubs are
automatically detected by `Sphinx`_, and the corresponding
:term:`executable scripts` in :obj:`sphinxcontrib.argdoc.test.cases` auto-detected
by :class:`TestArgdoc`.
2. The `rst` output is saved after `sphinxcontrib.argdoc` completes, before conversion
to HTML (or other formats) by `Sphinx`_. The `rst` output is compared
against reference output that can be found in ``sphinxcontrib/argdoc/test/testbuild``.
Comparing the `rst` output makes the tests
robust to potential changes in html rendering in `Sphinx`_.
To add a test case for a new :mod:`argparse` configuration
----------------------------------------------------------
1. Create an executable script using argparse following the general form
of the other tests. Put it in the subpackage :obj:`sphinxcontrib.argdoc.test.cases`
2. Create a matching `rst` document stub, and put it into ``sphinxcontrib/argdoc/test/testdocroot``
3. Add an entry for the `rst` document stub in ``sphinxcontrib/argdoc/test/testdocroot/master_toctree.rst``
"""
__date__ = "2015-06-09"
__author__ = "Joshua Griffin Dunn"
import os
import tempfile
import shlex
import shutil
import importlib
import sys
import codecs
import argparse
if sys.version_info < (3,):
import StringIO as StringIOWrapper
else:
import io as StringIOWrapper
import sphinxcontrib.argdoc.test.cases
from modulefinder import ModuleFinder
from pkg_resources import resource_filename, cleanup_resources
from nose.tools import assert_equal, assert_true, assert_dict_equal, assert_list_equal
from nose.plugins.attrib import attr
from sphinx import main as sphinxbuild
from sphinxcontrib.argdoc.ext import get_patterns, get_col1_text, get_col2_text, noargdoc,\
post_process_automodule,\
format_argparser_as_docstring,\
make_rest_table,\
safeunicode
class TestArgdoc():
"""Test case for functions defined in :mod:`sphinxcontrib.argdoc.ext`"""
@classmethod
def setUpClass(cls):
# retain record indicating whether builder has been run,
# so we run it a maximum of once, and only if we decide to do
# the expensive tests
cls.built = False
# options for sphinx-build runs
cls.optdict = { "sourcedir" : resource_filename("sphinxcontrib.argdoc","test/testdocroot"),
"conf" : resource_filename("sphinxcontrib.argdoc","test/testdocroot/conf.py"),
"outdir" : tempfile.mkdtemp(prefix="argdoc"),
}
cls.sphinxopts = "-Q -N -b html %(sourcedir)s %(outdir)s" % cls.optdict
# test cases for patterns
cls.pattern_tests = {}
cls.pattern_tests["positional_arg"] = [(" arg1",{"arg1":"arg1","desc":None}),
(" some_arg some_description with lots of words",
{ "arg1" : "some_arg",
"desc" : "some_description with lots of words"
}
),
("optional arguments:",None),
(" --kwarg M",None),
(" -k M",None),
(" -k",None),
(" --kwarg",None),
(" -k, --kwarg",None),
(" -k M, --kwarg M",None),
(" -k some_description with lots of words",None),
(" --kwarg some_description with lots of words",None),
(" -k, --kwarg some_description with lots of words",None),
(" -k M some_description with lots of words",None),
(" --kwarg M some_description with lots of words",None),
(" -k M, --kwarg M some_description with lots of words",None),
]
cls.pattern_tests["section_title"] = [("optional arguments:", ("optional arguments",)),
("optional arguments: ",None),
(" optional arguments:",None),
("optional: arguments:",("optional: arguments",)),
("positional arguments:",("positional arguments",)),
("some long string (with parentheses):",("some long string (with parentheses)",)),
]
cls.pattern_tests["arg_only"] = [
(" positional1",None),
(" po3413134",None),
(" reallyreallyreallyreallyreallyreallyreallyreallylongpositional",None),
(" --help", ('--help', None)),
(" -h", ('-h', None)),
(" -h, --help",('-h', '--help')),
# arg + vals + desc
(" -n M, --ne M some description", None),
(" -n M M, --ne M M some description", None),
(" -n M M M, --ne M M M some description", None),
(" -n M some description", None),
(" -n M M some description", None),
(" -n M M M some description", None),
(" --ne M some description", None),
(" --ne M M some description", None),
(" --ne M M M some description", None),
# arg + desc
(" -n, --ne some description", None),
(" -n some description", None),
(" --ne some description", None),
# arg + vals
("-n M, --ne M", None),
("-n M M, --ne M M", None),
("-n M M M, --ne M M M", None),
("-n M", None),
("-n M M", None),
("-n M M M", None),
("--ne M", None),
("--ne M M", None),
("--ne M M M", None),
]
cls.pattern_tests["arg_plus_val"] = [(" -o FILENAME, --out FILENAME",('-o', ' FILENAME', '--out', ' FILENAME')),
(" -o FILENAME",('-o', ' FILENAME', None, None)),
(" --out FILENAME",('--out', ' FILENAME', None, None)),
("-o FILENAME, --out FILENAME",None),
("-o FILENAME",None),
("--out FILENAME",None),
(" -n M M, --num M M",('-n', ' M M', '--num', ' M M')),
(" -n M M",('-n', ' M M', None,None)),
(" --num M M",('--num', ' M M',None,None)),
("-n M M, --num M M",None),
("-n M M",None),
("--num M M",None),
# arg + vals + desc
(" -n M, --ne M some description", None),
(" -n M M, --ne M M some description", None),
(" -n M M M, --ne M M M some description", None),
(" -n M some description", None),
(" -n M M some description", None),
(" -n M M M some description", None),
(" --ne M some description", None),
(" --ne M M some description", None),
(" --ne M M M some description", None),
# arg + desc
(" -n, --ne some description", None),
(" -n some description", None),
(" --ne some description", None),
# arg only
(" --help", None),
(" -h", None),
(" -h, --help", None),
]
cls.pattern_tests["arg_plus_desc"] = [(" -h, --help show this help message and exit",('-h','--help','show this help message and exit')),
(" -h show this help message and exit",('-h',None, 'show this help message and exit')),
(" --help show this help message and exit",('--help',None, 'show this help message and exit')),
(" -h, --help show this help message and exit",('-h','--help','show this help message and exit')),
(" -h show this help message and exit",('-h',None, 'show this help message and exit')),
(" --help show this help message and exit",('--help',None, 'show this help message and exit')),
("-h, --help show this help message and exit",None),
("-h show this help message and exit",None),
("--help show this help message and exit",None),
# arg only
(" --help", None),
(" -h", None),
(" -h, --help",None),
# arg + vals + desc
(" -n M, --ne M some description", None),
(" -n M M, --ne M M some description", None),
(" -n M M M, --ne M M M some description", None),
(" -n M some description", None),
(" -n M M some description", None),
(" -n M M M some description", None),
(" --ne M some description", None),
(" --ne M M some description", None),
(" --ne M M M some description", None),
# arg + vals
("-n M, --ne M", None),
("-n M M, --ne M M", None),
("-n M M M, --ne M M M", None),
("-n M", None),
("-n M M", None),
("-n M M M", None),
("--ne M", None),
("--ne M M", None),
("--ne M M M", None),
]
cls.pattern_tests["arg_plus_val_desc"] = [
(" -n M, --ne M some description", {"arg1" : "-n", "val1" : " M", "arg2" : "--ne", "val2" : " M", "desc" : "some description"}),
(" -n M M, --ne M M some description", {"arg1" : "-n", "val1" : " M M", "arg2" : "--ne", "val2" : " M M", "desc" : "some description"}),
(" -n M M M, --ne M M M some description", {"arg1" : "-n", "val1" : " M M M","arg2" : "--ne", "val2" : " M M M", "desc" : "some description"}),
(" -n M some description", {"arg1" : "-n", "val1" : " M", "arg2" : None, "val2" : None, "desc" : "some description"}),
(" -n M M some description", {"arg1" : "-n", "val1" : " M M", "arg2" : None, "val2" : None, "desc" : "some description"}),
(" -n M M M some description", {"arg1" : "-n", "val1" : " M M M","arg2" : None, "val2" : None, "desc" : "some description"}),
(" --ne M some description", {"arg1" : "--ne", "val1" : " M", "arg2" : None, "val2" : None, "desc" : "some description"}),
(" --ne M M some description", {"arg1" : "--ne", "val1" : " M M", "arg2" : None, "val2" : None, "desc" : "some description"}),
(" --ne M M M some description", {"arg1" : "--ne", "val1" : " M M M", "arg2" : None, "val2" : None, "desc" : "some description"}),
# arg + vals
(" -n M, --ne M ", None),
(" -n M M, --ne M M ", None),
(" -n M M M, --ne M M M ", None),
(" -n M ", None),
(" -n M M ", None),
(" -n M M M ", None),
(" --ne M ", None),
(" --ne M M ", None),
(" --ne M M M ", None),
# arg only
(" --help", None),
(" -h", None),
(" -h, --help", None),
# arg + desc
(" -n, --ne some description", None),
(" -n some description", None),
(" --ne some description", None),
# positional
(" positional1",None),
(" po3413134",None),
(" reallyreallyreallyreallyreallyreallyreallyreallylongpositional",None),
]
cls.pattern_tests["subcommand_names"] = {(" {one,another,four,five}",("one,another,four,five",)),
(" {one,another,four}",("one,another,four",)),
(" {one,another}",("one,another",)),
(" {just_one}",("just_one",)),
("{one,another,four,five}",None),
("{one,another,four}",None),
("{one,another}",None),
("{just_one}",None),
}
cls.pattern_tests["continue_desc"] = []
cls.pattern_tests["section_desc"] = [
(" choose one of the following:",("choose one of the following:",)),
(" Sometimes it is useful to group arguments that relate to each other in an",
("Sometimes it is useful to group arguments that relate to each other in an",)),
(" Description of second argument group",("Description of second argument group",)),
(" A special group of arguments in the `bar` subparser",("A special group of arguments in the `bar` subparser",)),
(" Oneworddescription",None),
# arg + vals
(" -n M, --ne M ", None),
(" -n M M, --ne M M ", None),
(" -n M M M, --ne M M M ", None),
(" -n M ", None),
(" -n M M ", None),
(" -n M M M ", None),
(" --ne M ", None),
(" --ne M M ", None),
(" --ne M M M ", None),
# arg + vals + desc
(" -n , --ne some description", None),
(" -n , --ne some description", None),
(" -n , --ne some description", None),
(" -n some description", None),
(" -n some description", None),
(" -n some description", None),
(" --ne some description", None),
(" --ne some description", None),
(" --ne some description", None),
# arg only
(" --help", None),
(" -h", None),
(" -h, --help", None),
(" arg1",None),
(" some_arg some_description with lots of words",None),
]
# test cases for test_get_col1_text, test_get_col2_text
cls.match_dicts = [
{ "arg1" : "ARG",
"col1" : "``ARG``",
"col2" : "",
},
{ "arg1" : "ARG",
"desc" : "some description",
"col1" : "``ARG``",
"col2" : "some description",
},
{ "arg1" : "-v",
"val1" : "ARG",
"col1" : "``-v ARG``",
"col2" : "",
},
{ "arg1" : "--val",
"val1" : "ARG",
"col1" : "``--val ARG``",
"col2" : ""
},
{ "arg1" : "-v",
"val1" : "ARG",
"arg2" : "--val",
"val2" : "ARG",
"desc" : "some description",
"col1" : "``-v ARG``, ``--val ARG``",
"col2" : "some description",
},
{ "arg1" : "-v",
"val1" : "ARG",
"desc" : "some description",
"col1" : "``-v ARG``",
"col2" : "some description",
},
{ "arg1" : "--val",
"val1" : "ARG",
"desc" : "some description",
"col1" : "``--val ARG``",
"col2" : "some description",
},
{ "arg1" : "-v",
"val1" : "ARG",
"arg2" : "--val",
"val2" : "ARG",
"desc" : "some description",
"col1" : "``-v ARG``, ``--val ARG``",
"col2" : "some description",
},
{ "arg1" : "-v",
"arg2" : "--val",
"col1" : "``-v``, ``--val``",
"col2" : ""
},
{ "arg1" : "-v",
"arg2" : "--val",
"desc" : "some description",
"col1" : "``-v``, ``--val``",
"col2" : "some description",
},
]
# automatically load module test cases for functional tests
# testcase names mapped to (module, expected rst output, built rst output)
cls.test_cases = {}
mf = ModuleFinder()
for modname in mf.find_all_submodules(sphinxcontrib.argdoc.test.cases):
if modname not in (__name__,"__init__"):
mod = importlib.import_module("sphinxcontrib.argdoc.test.cases.%s" % modname)
basename = "sphinxcontrib.argdoc.test.cases.%s_postargdoc.rst" % modname
tup = (mod,
resource_filename("sphinxcontrib.argdoc","test/testbuild/%s" % basename),
os.path.join(cls.optdict["outdir"],basename))
cls.test_cases[modname] = tup
@classmethod
def tearDownClass(cls):
"""Clean up temp files after tests are complete"""
cleanup_resources()
shutil.rmtree(cls.optdict["outdir"])
@classmethod
def run_builder(cls):
"""Run sphinx builder only the first time it is needed
Raises
------
AssertionError
If builder exists with non-zero status
"""
if cls.built == False:
try:
sphinxbuild(shlex.split(cls.sphinxopts))
except SystemExit as e:
if e.code != 0:
raise AssertionError("Error running sphinx-build (exited with code %s)" % e.code)
cls.built = True
@staticmethod
def check_pattern(test_name,pat,inp,expected):
"""Check patterns for matching, or non-matching
Parameters
----------
test_name : str
Name of test set being executed
pat : :class:`re.compile`
Pattern to test
inp : str
Input to test
expected : dict, tuple, or None
Expected result. If a `dict`, equivalence is tested with
`pat.match(inp).groupdict()` is called to test equivalence.
If a 'tuple' equivalence is tested with `pat.match(inp).groups()`,
:meth:`re.compile.groups` is called. If `None`, it is asserted
that `pat.match(inp)` is `None`
"""
if expected is None:
msg = "For test '%s', pattern %s' matched '%s', " % (test_name,
pat.pattern,
inp)
assert_true(pat.match(inp) is None,msg)
else:
if isinstance(expected,dict):
groups = pat.match(inp).groupdict()
fn = assert_dict_equal
else:
groups = pat.match(inp).groups()
fn = assert_equal
msg = "For test '%s', pattern %s' input '%s': expected %s, got %s " % (test_name,
pat.pattern,
inp,
expected,
groups)
fn(expected,groups,msg)
def test_patterns(self):
# test all patterns
patterns = get_patterns("-")
for name, cases in self.pattern_tests.items():
for inp,expected in cases:
yield self.check_pattern, name, patterns["-"][name], inp, expected
@staticmethod
def check_equal(expected,found,casename=""):
"""Helper method just to allow us to use test generators in other tests"""
if isinstance(expected,list):
idx = 2
elif isinstance(expected,str):
idx = 80
else:
idx = None
ellip = "..." if len(expected) > idx else ""
message = "Expected '%s%s', found '%s%s'" % (expected[:idx],ellip,found[:idx],ellip)
if casename != "":
message = "test '%s': %s" % (casename,message)
assert_equal(expected,found,message)
def test_prefix_chars_matches(self):
app = FakeApp(argdoc_prefix_chars="+")
parser = argparse.ArgumentParser(prefix_chars="+",
description="")
parser.add_argument("pos1",help="Positional argument 1")
parser.add_argument("pos2")
parser.add_argument("+o","++option",metavar="N",help="Some argument")
parser.add_argument("+x",nargs=2,metavar="N",help="argument with multiple values")
parser.add_argument("++other",action="store_true",default=False,help="No-value argument")
parser.add_argument("++argumentwithreallyreallyreallylongname",help="""An argument with
a really really really really long name, and a really reallly really long multi-line
help""")
lines = parser.format_help().split("\n")
found_lines = format_argparser_as_docstring(app,None,lines,get_patterns("+"))
expected_lines = [
'',
'------------',
'',
'',
u'Command-line arguments',
'----------------------',
u'',
'Positional arguments',
'~~~~~~~~~~~~~~~~~~~~',
u'',
u' ============= ==========================',
u' **Argument** **Description** ',
u' ------------- --------------------------',
u' ``pos1`` Positional argument 1 ',
u' ``pos2`` ',
u' ============= ==========================',
u'',
u'',
'Optional arguments',
'~~~~~~~~~~~~~~~~~~',
u'',
u' ========================================================================================= ================================================================================================================',
u' **Argument** **Description** ',
u' ----------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------',
u' ``+h``, ``++help`` show this help message and exit ',
u' ``+o N``, ``++option N`` Some argument ',
u' ``+x N N`` argument with multiple values ',
u' ``++other`` No-value argument ',
u' ``++argumentwithreallyreallyreallylongname ARGUMENTWITHREALLYREALLYREALLYLONGNAME`` An argument with a really really really really long name, and a really reallly really long multi-line help ',
u' ========================================================================================= ================================================================================================================',
u'']
n1 = n2 = 0
for line in expected_lines:
if line[:23] != "Command-line arguments":
n1 += 1
else:
break
for line in found_lines:
if line[:23] != "Command-line arguments":
n2 += 1
else:
break
yield self.check_list_equal, expected_lines[n1:], found_lines[n2:], "prefix_chars"
@staticmethod
def check_not_match(pattern,inp,msg):
# make sure a pattern does not match inp
assert_true(pattern.match(inp) is None,msg)
def test_prefix_chars_does_not_match_wrong(self):
# make sure patterns with prefix char "+" don't match examples with prefix char "-"
patterns = get_patterns("+")
for k in patterns["+"]:
if k.startswith("arg"):
for inp, _ in self.pattern_tests[k]:
msg = "pattern_does_not_match_wrong test %s: '%s' matched, should not have." % (k,inp)
yield self.check_not_match, patterns["+"][k], inp , msg
def test_prefix_chars_does_not_mix(self):
# make sure pattenr dicts with multiple prefix chars have no crosstalk
patterns = get_patterns("-+")
for k in patterns["+"]:
if k.startswith("arg"):
for inp, _ in self.pattern_tests[k]:
msg = "prefix_chars_does_not_mix test %s: '%s' matched, should not have." % (k,inp)
yield self.check_not_match, patterns["+"][k], inp , msg
for name, cases in self.pattern_tests.items():
for inp, expected in cases:
yield self.check_pattern, name, patterns["-"][name], inp, expected
def test_get_col1_text(self):
for my_dict in self.match_dicts:
yield self.check_equal, get_col1_text(my_dict), my_dict["col1"]
def test_get_col2_text(self):
for my_dict in self.match_dicts:
yield self.check_equal, get_col2_text(my_dict), my_dict["col2"]
def test_make_rest_table_with_title(self):
rows = [("Column 1","Column 2"),
("1","a"),
("2","b"),
("30000000000","something really long, or, somewhat long"),
("12315132","a line with ``special characters`` and *stars*")]
expected = [
safeunicode('================ ==================================================='),
safeunicode('**Column 1** **Column 2** '),
safeunicode('---------------- ---------------------------------------------------'),
safeunicode('1 a '),
safeunicode('2 b '),
safeunicode('30000000000 something really long, or, somewhat long '),
safeunicode('12315132 a line with ``special characters`` and *stars* '),
safeunicode('================ ==================================================='),
safeunicode('')
]
found = make_rest_table(rows,title=True,indent=0)
assert_list_equal(expected,found)
def test_make_rest_table_without_title(self):
rows = [("Column 1","Column 2"),
("1","a"),
("2","b"),
("30000000000","something really long, or, somewhat long"),
("12315132","a line with ``special characters`` and *stars*")]
expected = [
safeunicode('============ ==============================================='),
safeunicode('Column 1 Column 2 '),
safeunicode('1 a '),
safeunicode('2 b '),
safeunicode('30000000000 something really long, or, somewhat long '),
safeunicode('12315132 a line with ``special characters`` and *stars* '),
safeunicode('============ ==============================================='),
safeunicode('')
]
found = make_rest_table(rows,title=False,indent=0)
assert_list_equal(expected,found)
def test_make_rest_table_with_indent(self):
rows = [("Column 1","Column 2"),
("1","a"),
("2","b"),
("30000000000","something really long, or, somewhat long"),
("12315132","a line with ``special characters`` and *stars*")]
expected = [
safeunicode(' ============ ==============================================='),
safeunicode(' Column 1 Column 2 '),
safeunicode(' 1 a '),
safeunicode(' 2 b '),
safeunicode(' 30000000000 something really long, or, somewhat long '),
safeunicode(' 12315132 a line with ``special characters`` and *stars* '),
safeunicode(' ============ ==============================================='),
safeunicode('')
]
found = make_rest_table(rows,title=False,indent=4)
assert_list_equal(expected,found)
def test_noargdoc_adds_attribute(self):
def my_func():
pass
b = noargdoc(my_func)
assert_true(b.__dict__["noargdoc"])
@staticmethod
def check_list_equal(l1,l2,test_name):
mismatched = 0
in_l1 = []
in_l2 = []
i = j = 0
while i < len(l1) and j < len(l2):
line1 = l1[i]
line2 = l2[j]
if line1.rstrip() != line2.rstrip():
mismatched += 1
if line1 not in l2:
in_l1.append((i,line1))
i += 1
if line2 not in l1:
in_l2.append((j,line2))
j += 1
i += 1
j += 1
message = ""
if mismatched > 0:
message = safeunicode("-"*75 + "\n")
message += "Failed list equality for test %s\n" % test_name
message += "%s mismatches (expected 0).\n" % mismatched
message += "In list 1 only:\n"
for l in in_l1:
message += ("%s: %s\n" % l)
message += "In list 2 only:\n"
for l in in_l2:
message += ("%s: %s\n" % l)
message += "-"*75 + "\n"
assert_equal(mismatched,0,message)
def test_format_argparser_as_docstring(self):
# look at output & test against known RST
app = FakeApp(outdir=self.optdict["outdir"],argdoc_prefix_chars="-+")
for k in self.test_cases:
testname = "test_format_argparser_as_docstring '%s'" % k
mod, expected, _ = self.test_cases[k]
with codecs.open(expected,encoding="utf-8",mode="r") as f:
expected_lines = f.read().split("\n")
f.close()
buf = StringIOWrapper.StringIO()
old_out = sys.stdout
sys.stdout = buf
try:
mod.main(["--help"])
except SystemExit as e:
if e.code != 0:
raise(AssertionError("Exit code for '%s --help' was %s instead of zero" % (mod.__name__,e.code)))
sys.stdout = old_out
buf.seek(0)
lines = buf.read().split("\n")
found_lines = format_argparser_as_docstring(app,mod,lines,
section_head=True,
header_level=1,
patterns=get_patterns(prefix_chars="-+"))
n1 = n2 = 0
for line in expected_lines:
if line[:23] != "Command-line arguments":
n1 += 1
else:
break
for line in found_lines:
if line[:23] != "Command-line arguments":
n2 += 1
else:
break
yield self.check_list_equal, expected_lines[n1:], found_lines[n2:], testname
@attr(kind="functional")
def test_post_process_automodule(self):
self.run_builder()
for k, (_,expected,built) in self.test_cases.items():
if k == "noargdoc":
continue
with open(expected) as f:
expected_lines = f.read().split("\n")
with open(built) as f:
built_lines = f.read().split("\n")
testname = "test_post_process_automodule '%s'" % k
yield self.check_list_equal, expected_lines, built_lines, testname
def test_post_process_automodule_emits_event(self):
for k, (mod,_,_) in self.test_cases.items():
testname = "test_post_process_automodule_emits_event '%s'" % k
app = FakeApp(outdir=self.optdict["outdir"])
options = {}
expected = ["argdoc-process-docstring"]
_ = post_process_automodule(app,"module",mod.__name__,mod,options,[])
yield self.check_equal, expected, app.emitted, testname
class Record(object):
"""Proxy object that allows addition of arbitrary properties"""
def __init__(self):
pass
class FakeApp(object):
"""Proxy for a Sphinx application object. Implements minimial methods
required for us to test functions in :mod:`sphinxcontrib.argdoc.ext` that require
a Sphinx application instance
"""
def __init__(self,argdoc_main_func="main",argdoc_save_rst=True,outdir="/tmp/",argdoc_prefix_chars="-"):
self.config = Record()
self.config.argdoc_main_func = argdoc_main_func
self.config.argdoc_save_rst = argdoc_save_rst
self.config.argdoc_prefix_chars = argdoc_prefix_chars
self.outdir = outdir
self.emitted = []
def warn(self,*args,**kwargs):
pass
def debug(self,*args,**kwargs):
pass
def debug2(self,*args,**kwargs):
pass
def emit(self,*args,**kwargs):
"""Simulate `emit` method. Save event name in `self.emitted` at each call"""
self.emitted.append(args[0])
| {
"content_hash": "d4b937d8adb43619abb1d2120bd882da",
"timestamp": "",
"source": "github",
"line_count": 754,
"max_line_length": 230,
"avg_line_length": 52.87002652519894,
"alnum_prop": 0.3783614288581176,
"repo_name": "Lemma1/MAC-POSTS",
"id": "33e89b2e2773386416e03ab6b76583f136f86d0f",
"size": "39902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc_builder/sphinx-contrib/argdoc/sphinxcontrib/argdoc/test/test_argdoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "3394"
},
{
"name": "Batchfile",
"bytes": "103388"
},
{
"name": "C",
"bytes": "5399"
},
{
"name": "C++",
"bytes": "3595985"
},
{
"name": "CMake",
"bytes": "53433"
},
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "HTML",
"bytes": "18640"
},
{
"name": "JavaScript",
"bytes": "44610"
},
{
"name": "Jupyter Notebook",
"bytes": "7469541"
},
{
"name": "MATLAB",
"bytes": "5439"
},
{
"name": "Makefile",
"bytes": "148059"
},
{
"name": "Python",
"bytes": "1950140"
},
{
"name": "Shell",
"bytes": "2554"
}
],
"symlink_target": ""
} |
import re
from functools import reduce
def find_substring_n_times():
n = int(input())
text = "\n".join(input() for _ in range(n))
t = int(input())
for _ in range(t):
print(len(re.findall(r'\B(%s)\B' % input().strip(),text)))
find_substring_n_times() | {
"content_hash": "04167bb6118e1e2231a813471916af89",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 27.8,
"alnum_prop": 0.5863309352517986,
"repo_name": "vbsteja/code",
"id": "8228912dc1d266d0f69db7b1c8a55ad82c055e7c",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/NLP/regex_practice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "39"
},
{
"name": "Clojure",
"bytes": "9249"
},
{
"name": "Common Lisp",
"bytes": "1662"
},
{
"name": "Fortran",
"bytes": "677"
},
{
"name": "Haskell",
"bytes": "833"
},
{
"name": "Julia",
"bytes": "1286"
},
{
"name": "Jupyter Notebook",
"bytes": "6499558"
},
{
"name": "Python",
"bytes": "68994"
},
{
"name": "Racket",
"bytes": "668"
},
{
"name": "Scala",
"bytes": "416"
},
{
"name": "Shell",
"bytes": "51"
}
],
"symlink_target": ""
} |
import http.client
import json
import time
from conf.config import VR_PORT, SERVER_PORT, OCTBS_PORT, API_TEST_KEY, CONSOLEPROXY_PORT
from models.ApiResponse import ApiResponse
from utils.commonUtil import buildRetObj
from views.api.center.api import PARAM_NOT_NULL
from core.err_code import CONNECT_SERVER_ERR
def api_result(address, port, task_id, https=False):
conn = http.client.HTTPConnection(address, port)
conn.request("GET", "/api/result/%s/" % task_id)
response = conn.getresponse()
if response.status != 200:
return (1, None)
rsp_body = response.read()
try:
rsp = json.loads(rsp_body)
except:
return (2, None)
return (0, rsp)
def api_result_server(address, port, task_id, https=False):
return api_result(address, port, task_id, https)
def api_call(address, port, api_id, api_content, session_key, async=False, server=False, https=False):
conn = http.client.HTTPConnection(address, port)
headers = { "Content-Type": "application/json" }
api_body = {
"api": api_id,
"paras": api_content,
"async": async,
}
if session_key:
if (server):
api_body["session"] = {
"skey": session_key
}
else:
api_body["session"] = {
"uuid": session_key
}
try:
conn.request("POST", "/api/", json.dumps(api_body))
except:
return (CONNECT_SERVER_ERR, None)
response = conn.getresponse()
if response.status != 200:
return (1, None)
rsp_body = response.read()
if type(rsp_body) == type(b'a'):
rsp_body = rsp_body.decode()
try:
rsp = json.loads(rsp_body)
except:
return (2, None)
if (not async or rsp["data"]["state"] in ["Finished", "Failed"]):
return (0, rsp)
task_id = rsp["apiId"]
def query_until_done():
conn.request("GET", "/api/result/%s" % task_id)
response = conn.getresponse()
if response.status != 200:
return (3, None)
rsp_body = response.read()
rsp = json.loads(rsp_body)
if (rsp["data"]["state"] in ["Finished", "Failed"]):
return json.loads(0, rsp)
time.sleep(1)
return query_until_done()
return query_until_done()
def parse_paras(paras, api_proto):
for (k, v) in list(api_proto["paras"].items()):
inV = paras.get(k)
if (v["default"] == PARAM_NOT_NULL and not inV):
errorMsg = "paras \"%s\" must be specified" % k
return False, errorMsg
return 0, None
def get_server_key():
return API_TEST_KEY
def api_call_server(address, paras, api_proto, port=SERVER_PORT, async=False, https=False):
(ret, errorLog) = parse_paras(paras, api_proto)
if (ret):
retObj = buildRetObj(ret, data=None, errorLog=errorLog)
return ApiResponse(ret, retObj)
(ret, resp) = api_call(address, port, api_proto["apikey"], paras, get_server_key(), async, server=True, https=https)
return ApiResponse(ret, resp)
def api_call_vr(address, paras, api_proto, port=VR_PORT, async=False, https=False):
(ret, errorLog) = parse_paras(paras, api_proto)
if (ret):
retObj = buildRetObj(ret, data=None, errorLog=errorLog)
return ApiResponse(ret, retObj)
(ret, resp) = api_call(address, port, api_proto["apikey"], paras, get_server_key(), async, server=True, https=https)
return ApiResponse(ret, resp)
def api_call_octbs(address, paras, api_proto, port=OCTBS_PORT, async=False, https=False):
(ret, errorLog) = parse_paras(paras, api_proto)
if (ret):
retObj = buildRetObj(ret, data=None, errorLog=errorLog)
return ApiResponse(ret, retObj)
(ret, resp) = api_call(address, port, api_proto["apikey"], paras, get_server_key(), async, server=True, https=https)
return ApiResponse(ret, resp)
def api_call_consoleproxy(address, paras, api_proto, port=CONSOLEPROXY_PORT, async=False, https=False):
(ret, errorLog) = parse_paras(paras, api_proto)
if (ret):
retObj = buildRetObj(ret, data=None, errorLog=errorLog)
return ApiResponse(ret, retObj)
(ret, resp) = api_call(address, port, api_proto["apikey"], paras, get_server_key(), async, server=True, https=https)
return ApiResponse(ret, resp)
if __name__ == "__main__":
api = "octlink.center.v5.user.APILoginByAccount"
paras = {
"account": "admin",
"password": "admin",
}
session_uuid = None
(retCode, retObj) = api_call("127.0.0.1", "5443", api, paras, session_key=session_uuid, async=False)
if (retCode):
print("connect to server error")
else:
print((json.dumps(retObj, indent=4)))
(retCode, retObj) = api_result("127.0.0.1", "5443", "fe7babb9b2b94353b60dcc44c4694e31")
if (retCode):
print("connect to server error")
else:
print((json.dumps(retObj, indent=4)))
| {
"content_hash": "a134204870e42a66630949977f347915",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 117,
"avg_line_length": 27.337423312883434,
"alnum_prop": 0.6824506283662477,
"repo_name": "maqg/wcrobot",
"id": "d2d0bed20fa7a2c5357505798f131a238a5854ab",
"size": "4499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/utils/callapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "871"
},
{
"name": "HTML",
"bytes": "58362"
},
{
"name": "JavaScript",
"bytes": "23121"
},
{
"name": "Makefile",
"bytes": "630"
},
{
"name": "PLpgSQL",
"bytes": "16632"
},
{
"name": "Perl",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "156673"
},
{
"name": "Shell",
"bytes": "13165"
},
{
"name": "TypeScript",
"bytes": "12074"
}
],
"symlink_target": ""
} |
from robber import expect
from robber.matchers.base import Base
class Length(Base):
"""
expect('str').to.have.length(3)
expect([1, 2, 3]).to.have.length(3)
"""
def matches(self):
return len(self.actual) == self.expected
def failure_message(self):
return 'Expected "%s" to have a length of %d' % (self.actual, self.expected)
class Empty(Base):
"""
expect('').to.be.empty()
expect([]).to.be.empty()
"""
def matches(self):
return len(self.actual) == 0
def failure_message(self):
return 'Expected "%s" to be empty' % self.actual
class NotEmpty(Base):
"""
expect('foo').to.be.not_empty()
expect([1, 2, 3]).to.be.not_empty()
"""
def matches(self):
return len(self.actual) > 0
def failure_message(self):
return 'Expected "%s" to be nonempty' % self.actual
expect.register('length', Length)
expect.register('empty', Empty)
expect.register('not_empty', NotEmpty)
| {
"content_hash": "6e557297611e891075830e8158f5b21c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 24.55,
"alnum_prop": 0.6069246435845214,
"repo_name": "taoenator/robber.py",
"id": "5c76cd41813826e6c4141d9762b0ca1b38fa71fa",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robber/matchers/length.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35813"
}
],
"symlink_target": ""
} |
import textwrap
import pytest
from pants.core.goals import tailor
from pants.core.goals.tailor import (
AllOwnedSources,
DisjointSourcePutativeTarget,
EditBuildFilesRequest,
EditedBuildFiles,
PutativeTarget,
PutativeTargets,
PutativeTargetsRequest,
TailorSubsystem,
UniquelyNamedPutativeTargets,
default_sources_for_target_type,
group_by_dir,
make_content_str,
)
from pants.core.util_rules import source_files
from pants.engine.fs import EMPTY_DIGEST, DigestContents, FileContent, Workspace
from pants.engine.rules import QueryRule
from pants.engine.target import Sources, Target
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_goal_subsystem
from pants.testutil.rule_runner import MockConsole, MockGet, RuleRunner, run_rule_with_mocks
class MockPutativeTargetsRequest(PutativeTargetsRequest):
pass
class FortranSources(Sources):
expected_file_extensions = (".f90",)
class FortranTestsSources(FortranSources):
default = ("*_test.f90", "test_*.f90")
class FortranLibrarySources(FortranSources):
default = ("*.f90",) + tuple(f"!{pat}" for pat in FortranTestsSources.default)
class FortranLibrary(Target):
alias = "fortran_library"
core_fields = (FortranLibrarySources,)
class FortranTests(Target):
alias = "fortran_tests"
core_fields = (FortranTestsSources,)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor.rules(),
*source_files.rules(),
QueryRule(UniquelyNamedPutativeTargets, (PutativeTargets,)),
QueryRule(DisjointSourcePutativeTarget, (PutativeTarget,)),
QueryRule(EditedBuildFiles, (EditBuildFilesRequest,)),
QueryRule(AllOwnedSources, ()),
],
target_types=[FortranLibrary, FortranTests],
)
def test_default_sources_for_target_type() -> None:
assert default_sources_for_target_type(FortranLibrary) == FortranLibrarySources.default
assert default_sources_for_target_type(FortranTests) == FortranTestsSources.default
def test_make_content_str() -> None:
content = make_content_str(
"fortran_library()\n",
" ",
[
PutativeTarget.for_target_type(
FortranTests,
"path/to",
"tests",
["test1.f90", "test2.f90"],
kwargs={"name": "tests", "sources": ("test1.f90", "test2.f90")},
)
],
)
assert (
textwrap.dedent(
"""
fortran_library()
fortran_tests(
name="tests",
sources=[
"test1.f90",
"test2.f90",
],
)
"""
).lstrip()
== content
)
def test_rename_conflicting_targets(rule_runner: RuleRunner) -> None:
dir_structure = {
"src/fortran/foo/BUILD": "fortran_library(sources=['bar1.f90'])\n"
"fortran_library(name='foo0', sources=['bar2.f90'])",
"src/fortran/foo/bar1.f90": "",
"src/fortran/foo/bar2.f90": "",
"src/fortran/foo/bar3.f90": "",
}
for path, content in dir_structure.items():
rule_runner.create_file(path, content)
ptgt = PutativeTarget(
"src/fortran/foo", "foo", "fortran_library", ["bar3.f90"], FortranLibrarySources.default
)
unpts = rule_runner.request(UniquelyNamedPutativeTargets, [PutativeTargets([ptgt])])
ptgts = unpts.putative_targets
assert (
PutativeTargets(
[
PutativeTarget(
"src/fortran/foo",
"foo1",
"fortran_library",
["bar3.f90"],
FortranLibrarySources.default,
kwargs={"name": "foo1"},
)
]
)
== ptgts
)
def test_root_targets_are_explicitly_named(rule_runner: RuleRunner) -> None:
rule_runner.create_file("foo.f90", "")
ptgt = PutativeTarget("", "", "fortran_library", ["foo.f90"], FortranLibrarySources.default)
unpts = rule_runner.request(UniquelyNamedPutativeTargets, [PutativeTargets([ptgt])])
ptgts = unpts.putative_targets
assert (
PutativeTargets(
[
PutativeTarget(
"",
"root",
"fortran_library",
["foo.f90"],
FortranLibrarySources.default,
kwargs={"name": "root"},
)
]
)
== ptgts
)
def test_restrict_conflicting_sources(rule_runner: RuleRunner) -> None:
dir_structure = {
"src/fortran/foo/BUILD": "fortran_library(sources=['bar/baz1.f90'])",
"src/fortran/foo/bar/BUILD": "fortran_library(sources=['baz2.f90'])",
"src/fortran/foo/bar/baz1.f90": "",
"src/fortran/foo/bar/baz2.f90": "",
"src/fortran/foo/bar/baz3.f90": "",
}
for path, content in dir_structure.items():
rule_runner.create_file(path, content)
ptgt = PutativeTarget(
"src/fortran/foo/bar",
"bar0",
"fortran_library",
["baz3.f90"],
FortranLibrarySources.default,
)
dspt = rule_runner.request(DisjointSourcePutativeTarget, [ptgt])
ptgt = dspt.putative_target
assert ("baz3.f90",) == ptgt.owned_sources
assert ("baz3.f90",) == ptgt.kwargs.get("sources")
assert (
"# NOTE: Sources restricted from the default for fortran_library due to conflict with",
"# - src/fortran/foo",
"# - src/fortran/foo/bar",
) == ptgt.comments
def test_edit_build_files(rule_runner: RuleRunner) -> None:
rule_runner.create_file("src/fortran/foo/BUILD", 'fortran_library(sources=["bar1.f90"])')
rule_runner.create_dir("src/fortran/baz/BUILD") # NB: A directory, not a file.
req = EditBuildFilesRequest(
PutativeTargets(
[
PutativeTarget.for_target_type(
FortranTests,
"src/fortran/foo",
"tests",
["bar1_test.f90"],
kwargs={"name": "tests", "life_the_universe_and_everything": 42},
),
PutativeTarget.for_target_type(
FortranLibrary,
"src/fortran/foo",
"foo0",
["bar2.f90", "bar3.f90"],
kwargs={"name": "foo0", "sources": ("bar2.f90", "bar3.f90")},
comments=["# A comment spread", "# over multiple lines."],
),
PutativeTarget.for_target_type(
FortranLibrary, "src/fortran/baz", "baz", ["qux1.f90"]
),
]
),
indent=" ",
)
edited_build_files = rule_runner.request(EditedBuildFiles, [req])
assert edited_build_files.created_paths == ("src/fortran/baz/BUILD.pants",)
assert edited_build_files.updated_paths == ("src/fortran/foo/BUILD",)
contents = rule_runner.request(DigestContents, [edited_build_files.digest])
expected = [
FileContent("src/fortran/baz/BUILD.pants", "fortran_library()\n".encode()),
FileContent(
"src/fortran/foo/BUILD",
textwrap.dedent(
"""
fortran_library(sources=["bar1.f90"])
# A comment spread
# over multiple lines.
fortran_library(
name="foo0",
sources=[
"bar2.f90",
"bar3.f90",
],
)
fortran_tests(
name="tests",
life_the_universe_and_everything=42,
)
"""
)
.lstrip()
.encode(),
),
]
actual = list(contents)
# We do these more laborious asserts instead of just comparing the lists so that
# on a text mismatch we see the actual string diff on the decoded strings.
assert len(expected) == len(actual)
for efc, afc in zip(expected, actual):
assert efc.path == afc.path
assert efc.content.decode() == afc.content.decode()
assert efc.is_executable == afc.is_executable
def test_group_by_dir() -> None:
paths = {
"foo/bar/baz1.ext",
"foo/bar/baz1_test.ext",
"foo/bar/qux/quux1.ext",
"foo/__init__.ext",
"foo/bar/__init__.ext",
"foo/bar/baz2.ext",
"foo/bar1.ext",
"foo1.ext",
"__init__.ext",
}
assert {
"": {"__init__.ext", "foo1.ext"},
"foo": {"__init__.ext", "bar1.ext"},
"foo/bar": {"__init__.ext", "baz1.ext", "baz1_test.ext", "baz2.ext"},
"foo/bar/qux": {"quux1.ext"},
} == group_by_dir(paths)
def test_tailor_rule(rule_runner: RuleRunner) -> None:
console = MockConsole(use_colors=False)
workspace = Workspace(rule_runner.scheduler)
union_membership = UnionMembership({PutativeTargetsRequest: [MockPutativeTargetsRequest]})
run_rule_with_mocks(
tailor.tailor,
rule_args=[
create_goal_subsystem(TailorSubsystem, build_file_indent=" "),
console,
workspace,
union_membership,
],
mock_gets=[
MockGet(
output_type=PutativeTargets,
input_type=PutativeTargetsRequest,
mock=lambda req: PutativeTargets(
[
PutativeTarget.for_target_type(
FortranTests, "src/fortran/foo", "tests", ["bar1_test.f90"]
),
PutativeTarget.for_target_type(
FortranLibrary, "src/fortran/baz", "baz", ["qux1.f90"]
),
PutativeTarget.for_target_type(
FortranLibrary,
"src/fortran/conflict",
"conflict",
["conflict1.f90", "conflict2.f90"],
),
]
),
),
MockGet(
output_type=UniquelyNamedPutativeTargets,
input_type=PutativeTargets,
mock=lambda pts: UniquelyNamedPutativeTargets(
PutativeTargets(
[pt.rename("conflict0") if pt.name == "conflict" else pt for pt in pts]
)
),
),
MockGet(
output_type=DisjointSourcePutativeTarget,
input_type=PutativeTarget,
# This test exists to test the console output, which isn't affected by
# whether the sources of a putative target were modified due to conflict,
# so we don't bother to inject such modifications. The BUILD file content
# generation, which is so affected, is tested separately above.
mock=lambda pt: DisjointSourcePutativeTarget(pt),
),
MockGet(
output_type=EditedBuildFiles,
input_type=EditBuildFilesRequest,
mock=lambda _: EditedBuildFiles(
# We test that the created digest contains what we expect above, and we
# don't need to test here that writing digests to the Workspace works.
# So the empty digest is sufficient.
digest=EMPTY_DIGEST,
created_paths=("src/fortran/baz/BUILD",),
updated_paths=(
"src/fortran/foo/BUILD",
"src/fortran/conflict/BUILD",
),
),
),
],
union_membership=union_membership,
)
stdout_str = console.stdout.getvalue()
assert (
"Created src/fortran/baz/BUILD:\n - Added fortran_library target src/fortran/baz"
in stdout_str
)
assert (
"Updated src/fortran/foo/BUILD:\n - Added fortran_tests target src/fortran/foo:tests"
in stdout_str
)
assert (
"Updated src/fortran/conflict/BUILD:\n - Added fortran_library target "
"src/fortran/conflict:conflict0"
) in stdout_str
def test_all_owned_sources(rule_runner: RuleRunner) -> None:
for path in [
"dir/a.f90",
"dir/b.f90",
"dir/a_test.f90",
"dir/unowned.txt",
"unowned.txt",
"unowned.f90",
]:
rule_runner.create_file(path)
rule_runner.add_to_build_file("dir", "fortran_library()\nfortran_tests(name='tests')")
assert rule_runner.request(AllOwnedSources, []) == AllOwnedSources(
["dir/a.f90", "dir/b.f90", "dir/a_test.f90"]
)
| {
"content_hash": "da25f915f598c728f71ddab65c3b8a53",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 96,
"avg_line_length": 33.416666666666664,
"alnum_prop": 0.5444201995012469,
"repo_name": "jsirois/pants",
"id": "f8dd2d4622a5699c7c87f841e7131202111b3eaa",
"size": "12964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/core/goals/tailor_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6008"
},
{
"name": "Mustache",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "2837069"
},
{
"name": "Rust",
"bytes": "1241058"
},
{
"name": "Shell",
"bytes": "57720"
},
{
"name": "Starlark",
"bytes": "27937"
}
],
"symlink_target": ""
} |
"""Unit tests for counters and counter names."""
from __future__ import absolute_import
import unittest
from apache_beam.utils import counters
from apache_beam.utils.counters import CounterName
class CounterNameTest(unittest.TestCase):
def test_name_string_representation(self):
counter_name = CounterName('counter_name',
'stage_name',
'step_name')
# This string representation is utilized by the worker to report progress.
# Change only if the worker code has also been changed.
self.assertEqual('stage_name-step_name-counter_name', str(counter_name))
self.assertIn('<CounterName<stage_name-step_name-counter_name> at 0x',
repr(counter_name))
def test_equal_objects(self):
self.assertEqual(CounterName('counter_name',
'stage_name',
'step_name'),
CounterName('counter_name',
'stage_name',
'step_name'))
self.assertNotEqual(CounterName('counter_name',
'stage_name',
'step_name'),
CounterName('counter_name',
'stage_name',
'step_nam'))
# Testing objects with an IOTarget.
self.assertEqual(CounterName('counter_name',
'stage_name',
'step_name',
io_target=counters.side_input_id(1, 's9')),
CounterName('counter_name',
'stage_name',
'step_name',
io_target=counters.side_input_id(1, 's9')))
self.assertNotEqual(CounterName('counter_name',
'stage_name',
'step_name',
io_target=counters.side_input_id(1, 's')),
CounterName('counter_name',
'stage_name',
'step_name',
io_target=counters.side_input_id(1, 's9')))
def test_hash_two_objects(self):
self.assertEqual(hash(CounterName('counter_name',
'stage_name',
'step_name')),
hash(CounterName('counter_name',
'stage_name',
'step_name')))
self.assertNotEqual(hash(CounterName('counter_name',
'stage_name',
'step_name')),
hash(CounterName('counter_name',
'stage_name',
'step_nam')))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "adf9cda300f55f1fee9d1fb0c9725bc8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 42.375,
"alnum_prop": 0.4280563749590298,
"repo_name": "RyanSkraba/beam",
"id": "d86886143dbc91f8e623fd80367bf7ec36482a0d",
"size": "3836",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/utils/counters_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
import unittest
from pyra.iindex import InvertedIndex, INF
class TestInvertedIndex(unittest.TestCase):
def setUp(self):
pass
def test_trivial_corpus(self):
corpus = "the quick brown fox jumps over the lazy dog and the brown dog runs away"
tokens = corpus.split()
iidx = InvertedIndex(tokens)
self.assertEqual(iidx.first('dog'), 8)
self.assertEqual(iidx.last('dog'), 12)
self.assertEqual(iidx.next('dog', 8), 12)
self.assertEqual(iidx.prev('dog', 12), 8)
self.assertEqual(iidx.first('cat'), INF)
self.assertEqual(iidx.last('cat'), -INF)
self.assertEqual(iidx.next('cat', 8), INF)
self.assertEqual(iidx.prev('cat', 12), -INF)
self.assertEqual(iidx.first('fox'), 3)
self.assertEqual(iidx.last('fox'), 3)
self.assertEqual(iidx.frequency('dog', -INF, INF), 2)
self.assertEqual(iidx.frequency('dog', -INF, 9), 1)
self.assertEqual(iidx.frequency('dog', -INF, 8), 1)
self.assertEqual(iidx.frequency('dog', -INF, 7), 0)
self.assertEqual(iidx.frequency('dog', 7, 13), 2)
self.assertEqual(iidx.frequency('dog', 8, 12), 2)
self.assertEqual(iidx.frequency('dog', 12, INF), 1)
self.assertEqual(iidx.frequency('dog', 13, 14), 0)
self.assertEqual(iidx.frequency('cat', -INF, INF), 0)
self.assertEqual(iidx.frequency('cat', 2, INF), 0)
self.assertEqual(iidx.frequency('cat', -INF, 3), 0)
self.assertEqual(iidx.frequency('cat', 2, 4), 0)
self.assertEqual(list(iidx.postings('dog')), [8, 12])
self.assertEqual(list(iidx.postings('dog', reverse=True)), [12,8])
self.assertEqual(list(iidx.postings('dog', 12)), [12])
self.assertEqual(list(iidx.postings('cat')), [])
self.assertEqual(list(iidx.postings('cat', reverse=True)), [])
self.assertEqual(iidx.dictionary() ^ set(tokens), set())
| {
"content_hash": "8aca40b40b2ae60b1393d928c8bd2693",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 90,
"avg_line_length": 60.853658536585364,
"alnum_prop": 0.47935871743486974,
"repo_name": "afourney/pyra",
"id": "0ab84bbc7db632acc3892ac51007052257c6a23f",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_iindex.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "49734"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
"""Dataset preprocessing utils, for creating tf records etc.."""
import tensorflow.compat.v1 as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
| {
"content_hash": "1440d75b8e7bd0237c6734fbe0195be0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 26.25925925925926,
"alnum_prop": 0.7588152327221439,
"repo_name": "tensorflow/tpu",
"id": "12950b0ca41a387bb62ddc26549d78323fd0bf9e",
"size": "1398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/official/detection/projects/vild/preprocessing/dataset_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""add new netloc tracking table
Revision ID: 669e9df34ea7
Revises: 5552dfae2cb0
Create Date: 2020-01-20 01:36:51.862767
"""
# revision identifiers, used by Alembic.
revision = '669e9df34ea7'
down_revision = '5552dfae2cb0'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('seen_netloc_tracker',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('netloc', citext.CIText(), nullable=False),
sa.Column('ignore', sa.Boolean(), nullable=True),
sa.Column('have', sa.Boolean(), nullable=True),
sa.Column('extra', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_seen_netloc_tracker_netloc'), 'seen_netloc_tracker', ['netloc'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_seen_netloc_tracker_netloc'), table_name='seen_netloc_tracker')
op.drop_table('seen_netloc_tracker')
# ### end Alembic commands ###
| {
"content_hash": "31aa352dc940ab8b6f015d5f984258d3",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 106,
"avg_line_length": 32.943396226415096,
"alnum_prop": 0.7394043528064147,
"repo_name": "fake-name/ReadableWebProxy",
"id": "bd385f28e893098f82c6b5f494e2c5b0c69f04e7",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/2020-01-20_669e9df34ea7_add_new_netloc_tracking_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from django.db import connection
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from data_related_to_me import DataRelatedToMe
from .forms import FilterForm
# Create your views here.
@login_required
def view_test_query(request, username=None):
if username is None:
username = 'garyking'
f = FilterForm()
d = dict(username=username, d2me=DataRelatedToMe(username=username),
filter_form=f)
return render_to_response('miniverse/test_query.html', d)
# Create your views here.
def xview_test_query(request, username=None):
if username is None:
username = 'garyking'
d = dict(username=username, d2me=DataRelatedToMe(username=username))
# --------------------------------------
# role description query
# --------------------------------------
role_query = """SELECT name, id, description FROM dataverserole ORDER by id;"""
role_query_results = get_query_results(role_query)
d.update(dict(role_query=role_query, role_query_results=role_query_results))
# --------------------------------------
# role assignments query
# --------------------------------------
assign_query = """SELECT r.id, r.assigneeidentifier, r.definitionpoint_id, r.role_id
FROM roleassignment r
WHERE substr(r.assigneeidentifier, 2)= '%s';""" % (username,)
assign_query_results = get_query_results(assign_query)
d.update(dict(assign_query=assign_query, assign_query_results=assign_query_results))
# Retrieve dvobject ids from query
dv_ids = [ x['definitionpoint_id'] for x in assign_query_results]
if len(dv_ids) == 0:
return HttpResponse('no assignments')
dv_ids_as_strings = [ str(x) for x in dv_ids]
# --------------------------------------
# dvobject query - DIRECT ASSIGNMENTS
# --------------------------------------
dvobject_query = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.id IN (%s)
ORDER BY dv.dtype;""" % ','.join(dv_ids_as_strings)
dvobject_query_results = get_query_results(dvobject_query)
d.update(dict(dvobject_query=dvobject_query, dvobject_query_results=dvobject_query_results),
dv_ids_as_strings=dv_ids_as_strings)
# Dataverse IDs
dataverse_ids = [x['id'] for x in dvobject_query_results if x['dtype']=='Dataverse']
num_dataverses = len(dataverse_ids)
# Dataset IDs
dataset_ids = [x['id'] for x in dvobject_query_results if x['dtype']=='Dataset']
num_datasets = len(dataset_ids)
# Files
datafile_ids = [x['id'] for x in dvobject_query_results if x['dtype']=='DataFile']
num_files = len([x for x in dvobject_query_results if x['dtype']=='DataFile'])
d.update(dict(num_dataverses=num_dataverses, num_datasets=num_datasets, num_files=num_files))
# --------------------------------------
# Data query - INDIRECT Datasets
# --------------------------------------
parent_dataverse_ids = [str(x) for x in dataverse_ids]
secondary_dataset_query = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.owner_id IN (%s)
AND dv.dtype IN ('Dataset')
ORDER BY dv.dtype;
""" % (','.join(parent_dataverse_ids ),
)
secondary_dataset_query_results = get_query_results(secondary_dataset_query)
d.update(dict(secondary_dataset_query=secondary_dataset_query,
secondary_dataset_query_results=secondary_dataset_query_results))
# --------------------------------------
# Data query - INDIRECT ASSIGNMENTS - FILES
# --------------------------------------
secondary_parent_dataset_ids = [x['id'] for x in secondary_dataset_query_results if x['dtype']=='Dataset']
parent_dataset_ids = [str(x) for x in (dataset_ids + secondary_parent_dataset_ids)]
secondary_file_query = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.owner_id IN (%s)
AND dv.dtype = 'DataFile'
ORDER BY dv.dtype;
""" % (','.join(parent_dataset_ids),
)
secondary_file_query_results = get_query_results(secondary_file_query)
d.update(dict(secondary_file_query=secondary_file_query,
secondary_file_query_results=secondary_file_query_results))
return render_to_response('miniverse/test_query.html', d)
return HttpResponse('hi')
def get_query_results(query_str):
cursor = connection.cursor()
cursor.execute(query_str)
return dictfetchall(cursor)
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
| {
"content_hash": "8a4bbae5db5161e82552cc69fb228d9e",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 110,
"avg_line_length": 33.29370629370629,
"alnum_prop": 0.626339004410838,
"repo_name": "IQSS/miniverse",
"id": "29bf0fc81ca74e2b4424ee8b9a96a424e503d7be",
"size": "4761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dv_apps/dvobjects/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "171690"
},
{
"name": "HTML",
"bytes": "218468"
},
{
"name": "JavaScript",
"bytes": "3789254"
},
{
"name": "Python",
"bytes": "710342"
}
],
"symlink_target": ""
} |
from shotgun_api3.shotgun import Shotgun
from elefant.utilities import config
from shotgun_replica import cleanSysName, connectors, _create_shotgun_classes, thumbnails
from shotgun_replica.utilities import debug
from shotgun_replica.sync import sync_settings
# leave empty for every entity to be checked
UPDATE_ONLY = [ ]
def _connect():
conf = config.Configuration()
conn = connectors.getDBConnection()
cur = conn.cursor()
sg = Shotgun( conf.get( config.CONF_SHOTGUN_URL ),
conf.get( config.CONF_SHOTGUN_SYNC_SKRIPT ),
conf.get( config.CONF_SHOTGUN_SYNC_KEY ) )
return ( conn, cur, sg )
def importEntities( conn, cur, sg ):
debug.debug( "starting import Entities", debug.INFO )
entities = sg.schema_entity_read()
classes = entities.keys()
classes.sort()
for entityType in classes:
if entityType in ["EventLogEntry"]:
continue
if len( UPDATE_ONLY ) > 0 and entityType not in UPDATE_ONLY:
continue
entityName = cleanSysName( entities[entityType]["name"]["value"] )
if entityType.endswith( "Connection" ):
entityName = entityType
debug.debug( "import entities of type " + entityType )
fieldList = connectors.getClassOfType( entityName ).shotgun_fields
debug.debug( "deleting entities of type " + entityType )
query = "DELETE FROM \"%s\"" % ( entityType )
cur.execute( query )
debug.debug( "loading entities of type " + entityType )
objects = sg.find( entityType, [["id", "greater_than", 0]], fieldList.keys() )
for obj in objects:
values = []
names = []
reprs = []
for fieldName in fieldList.keys():
sgType = fieldList[fieldName]['data_type']['value']
convFunc = connectors.getConversionSg2Pg( sgType )
if convFunc != None:
names.append( "\"%s\"" % fieldName )
if sgType == "image" and obj[fieldName] != None:
thumbnails.saveShotgunImageLocally( obj[fieldName] )
if sgType == "multi_entity":
reprs.append( "%s::entity_sync[]" )
else:
reprs.append( "%s" )
values.append( convFunc( obj[fieldName] ) )
query = "INSERT INTO \"%s\" (%s) VALUES (%s)" % ( entityType,
", ".join( names ),
", ".join( reprs ) )
debug.debug( cur.mogrify( str( query ), values ), debug.DEBUG )
cur.execute( query, values )
conn.commit()
debug.debug( "finnished import Entities", debug.INFO )
def setSyncSettings( sg ):
eventliste = sg.find(
"EventLogEntry",
filters = [ ],
fields = ['id'],
order = [{'column':'id', 'direction':'desc'}],
filter_operator = 'all',
limit = 1 )
lastEventId = eventliste[0]["id"]
syncSettings = sync_settings.SyncomaniaSettings()
syncSettings.load()
syncSettings[sync_settings.FIELD_LASTEVENTID] = lastEventId
syncSettings.save()
def removeChangeEvents( conn, cur, sg ):
cur.execute('DELETE FROM "ChangeEventsToShotgun"');
if __name__ == "__main__":
( conn, cur, sg ) = _connect()
_create_shotgun_classes.main()
setSyncSettings( sg )
removeChangeEvents( conn, cur, sg )
importEntities( conn, cur, sg )
| {
"content_hash": "f08d758144f48db4b9bf69b8a3d41bfb",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 89,
"avg_line_length": 34.471698113207545,
"alnum_prop": 0.5514504652435687,
"repo_name": "dahiro/shotgun-replica",
"id": "b459142a683e59d14e91196600dd1d091b5591b0",
"size": "3656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shotgun_replica/python/src/shotgun_replica/initialization/initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5184"
},
{
"name": "Python",
"bytes": "140687"
}
],
"symlink_target": ""
} |
import SocketServer
import json
import Globals
from Vec2D import Vec2D
from Environment import Environment
from Circle import Circle,Obstacle
def trajectory(start,v_start,end,v_end,delta_t,obstacles):
obs = obstacles + Globals.STATIC_OBSTACLES
env = Environment(obs)
return env.path(start,v_start,end,v_end,delta_t)
class TrajectoryServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = True
class TrajectoryHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
data = json.loads(self.request.recv(2048).strip())
start_info = data[0]
end_info = data[1]
delta_t = float(data[2])/1000 #ms -> s
nb_of_waypoints = data[3]
obstacles = data[4]
start = Vec2D(float(start_info[0])/1000,float(start_info[1])/1000)
v_start = Vec2D(float(start_info[2])/1000,float(start_info[3])/1000)
end = Vec2D(float(end_info[0])/1000,float(end_info[1])/1000)
v_end = Vec2D(0,0)
obstacle_list = []
for o in obstacles:
pos = Vec2D(float(o[0])/1000,float(o[1])/1000)
speed = Vec2D(float(o[2])/1000,float(o[3])/1000)
r = float(o[4])/1000
obstacle_list.append(Obstacle(Circle(pos,r + Globals.ROBOT_RADIUS),speed))
traj = trajectory(start,v_start,end,v_end,delta_t,obstacle_list)[:nb_of_waypoints]
response = []
for w in traj:
pos = w[0]
x = int(pos.x * 1000)
y = int(pos.y * 1000)
v = w[1]
vx = int(v.x * 1000)
vy = int(v.y * 1000)
t = int(w[2] * 1000)
response.append([x,y,vx,vy,t])
self.request.sendall(json.dumps(response).strip())
except Exception, e:
print "Exception wtf?", e
server = TrajectoryServer(('0.0.0.0',1337),TrajectoryHandler)
server.serve_forever()
| {
"content_hash": "216ea8d85a69b5902762115a1e5664f2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 94,
"avg_line_length": 32.33870967741935,
"alnum_prop": 0.5635910224438903,
"repo_name": "31415us/linesNcircles",
"id": "1bbf5f1069c05f465c0de5e07d842fb4075f2fee",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28857"
}
],
"symlink_target": ""
} |
"""
@author: J.W. Spaak
Contains all functions, that are needed to generate communities as well as
compute the EF of those communities
Contains the same functions as community_construction_coex, but does so for
the replacing community structure
"""
import numpy as np
import pickle
from numpy.random import uniform as uni
from scipy.integrate import simps
sqrt = np.sqrt(3) #is needed often in the program
n = 20 #number of species, constant for all cases
def dist(low = 0, high = 1, size = None):
"""returns a random variable between low and high, uniform distribution
Parameters:
low: array like
lower boundaries for distributions
high: array like
higher boundaries for distributions, must have same shape as low
size: integer, optional
size of randomvariables to be generated. If size is not None, then
high and low must be floats
Returns:
rand_var: array like
Same shape as low and high. Contains the random variables
Examples:
low = np.array([0,0,2])
high = np.array([1,2,4])
result = vec_uni(low, high)
# result[0] is uniformly distributed in [0,1]
# result[1] is uniformly distributed in [0,2]
# result[2] is uniformly distributed in [2,4]"""
if size == None:
size = low.shape
low = np.array(low)
high = np.array(high)
else:
low = low*np.ones(size) #convert low and high into right shape
high = high*np.ones(size)
# choose the type of the distributions
dist_fun = np.random.uniform
return low+(high-low)*dist_fun(size = size) #linear transformation
def rand_par(e_min=-1, ave_min = -0.5, ave_max = 0.5, e_max = 1,p='rand',
ad_com = 0.005,num = 100000):
""" returns randomized parameters for num_com communities
The function randomly generates num_com*(1+ad_com) communities until it
finds num_com communities fullfilling all coexistence requirements. This
method slightly shifts the distribution of alpha and e towards 0. The
error in the distribution is smaller than ad_com
Pleasse refer to supplementary data 7 to understand the code
Input:
num_com: scalar
number of species to be generated
p: scalar or string
Percent of species that are in ref and changed site (Type b species)
p*n must be an integer or p ='rand'
'rand will randomly generate values for p
Note: p = 1 is NOT equivalent to coex.rand_par, because of the
coexistence requirements.
ave_max, ave_min: scalar<1, ave_min<=ave_max
the maximum/minimum that are allowed for the
average sensitivity of each species type
e_max, e_min: scalar<1, e_min<=ave_min,ave_max<=e_max
the maximum/minimum that are allowed for the sensitivity
of each species (individually)
ad_com: scalar
Proportion of additionally computed communities.
returns:
mu: dict
Contains all growth rates and some associated values
e: dict
contains the sensitivity
comp: array
Relative competition
f: dict
contains the per capita contributions
p_ret: array
Percent of species that are in ref and changed site (Type b species)
if p was a scalar, then p_ret = p*np.ones(num_com).
alpha: array
competition parameter
"""
#check input correctness
if not(e_min<=ave_min<=ave_max<=e_max):
raise InputError("Please sort the input: e_min<=ave_min<=ave_max<=e_max")
if e_max>1: #growth rate of that species would be negative
raise InputError("e_max>1, effects above 1 are not allowed")
if not (p=='rand' or p*n==int(p*n)): #a species goes extinct or not
raise InputError("p must either be 'rand' or p*n must be an integer")
#save the original number of communites
num_com = num
#number of communities to construct
num = int(np.ceil(num_com*(1+ad_com)))
# fixed parameters, do not change to find communities
e_fix = {'avb':dist(ave_min,ave_max,num), #average effect on species b
'avc': np.zeros(num), #will be filled with data while running
'tc': np.zeros(num),
'tb': np.zeros(num)}
mu_fix = {'avb':np.zeros(num),#will be filled with data while running
'avc': np.zeros(num),
'tc': np.zeros(num),
'avu': np.zeros(num),
'tu': np.zeros(num),
'tb': np.zeros(num)}
alpha = uni(-0.95,-0.05,num) # interaction coecfficient
comp_fix = -alpha*n/(1-alpha*(n-1)) #effective competition , computed
# Fixed communities fullfill coexistence requirements
not_fix = np.array(num*[True])
# percent of species with type C
if p is 'rand':
p_fix = np.random.randint(1,n-1,num)/n
else:
p_fix = p*np.ones(num)
#randomly generate communities, until num_com many fullfill coex. req.
#attention, changing settings might turn this into an infinite loop
while num>num_com*ad_com:
#copy the predefined values into arrays to be used
e = {'avb': e_fix['avb'][not_fix]}
p = p_fix[not_fix]
q = 1-p
comp = comp_fix[not_fix]
# min(mu['avb'],mu['avu'])/(p*mu['avb']+q*mu['avu'])>comp
mu = {'avb': dist(0,10,num)}
mu['avu'] = dist(mu['avb']*comp*p/(1-q*comp),
np.amin([mu['avb']*(1-p*comp)/(q*comp),10*np.ones(num)], axis = 0))
#coexistence limit, min(mu)/mean(mu)>comp
tresh1 = comp*(p*mu['avb']+q*mu['avu'])
# chosen such that min (mu_u,mu_b) > tresh1, i.e. coexist
mu['tb'] =dist(-(1-tresh1/mu['avb'])/sqrt,(1-tresh1/mu['avb'])/sqrt)
mu['tu'] =dist(-(1-tresh1/mu['avu'])/sqrt,(1-tresh1/mu['avu'])/sqrt)
# mu['avc']*(1-ave_min) must be able to coexist in changed site
tresh2 = mu['avb']*(1-e['avb'])*p*comp/(1-comp*q)/(1-ave_min)
# we always have treshhold2<treshhold1
mu['avc'] = dist(tresh2,tresh1)
# ensure, that min(mu_c) fullfills same conditions as mu['avc']
bound = np.amin([tresh1/mu['avc']-1, 1-tresh2/mu['avc']],axis = 0)/sqrt
mu['tc'] = dist(-bound,bound)
# mu['avc']*(1-e['avc']) fullfills coexistence conditions
# choose min for e['avc']
tresh1 = np.amax([1-mu['avb']/mu['avc']*(1-e['avb'])*(1-comp*p)\
/(q*comp),ave_min*np.ones(num)],axis = 0)
# choose max for e['avc']
tresh2 = np.amin([1-mu['avb']/mu['avc']*(1-e['avb'])/(1-comp*q)\
*(p*comp),ave_max*np.ones(num)],axis = 0)
e['avc'] = dist(tresh1, tresh2)
# choose borders, that e_i are within [e_min, e_max]
minimum = np.amin([np.sign(e['avb'])*(e_max/e['avb']-1),
np.sign(e['avb'])*(1-e_min/e['avb'])], axis = 0)
e['tb'] = uni(-minimum/sqrt,minimum/sqrt)
minimum = np.amin([np.sign(e['avc'])*(e_max/e['avc']-1),
np.sign(e['avc'])*(1-e_min/e['avc'])], axis = 0)
e['tc'] = dist(-minimum/sqrt,minimum/sqrt)
# average growthsrates in changed site of the species types
mu['avb_change'] = mu['avb']*e['avb']*(1/e['avb']-1 - mu['tb']*e['tb'])
mu['avc_change'] = mu['avc']*e['avc']*(1/e['avc']-1 - mu['tc']*e['tc'])
# average growthrate of entire community in changed site
mu['av_change'] = p*mu['avb_change']+q*mu['avc_change']
# reference types are assumed to have e_i = 1, always
# if this part of the code is changed, please also change in coex_test
# e['avu'] = 1 #change if desired differently
# e['tu'] = 0
#copy the parameters into the fixed parameters
for k in e_fix.keys():
if k == 'avb': #do not copy into fixed 'avb'
pass
e_fix[k][not_fix] = e[k]
for k in mu_fix.keys():
mu_fix[k][not_fix] = mu[k]
#check which species can coexist and update not_fix
coex = coex_test(mu,e,comp)
not_fix[not_fix] = np.logical_not(coex)
num = np.count_nonzero(not_fix) #number of not fixed communities
fix = np.logical_not(not_fix) #communities that are fixed, i.e. coex
# choose only num_com coexisting communities
comp_ret = comp_fix[fix][:num_com]
alpha_ret = alpha[fix][:num_com]
p_ret = p_fix[fix][:num_com]
mu_ret = {key: mu_fix[key][fix][:num_com] for key in mu_fix.keys()}
e_ret = {key: e_fix[key][fix][:num_com] for key in e_fix.keys()}
# average growthsrates in changed site of the species types
mu_ret['avb_change'] = mu_ret['avb']*e_ret['avb']*\
(1/e_ret['avb']-1 - mu_ret['tb']*e_ret['tb'])
mu_ret['avc_change'] = mu_ret['avc']*e_ret['avc']*\
(1/e_ret['avc']-1 - mu_ret['tc']*e_ret['tc'])
# average growthrate of entire community
mu_ret['av_change'] = p_ret*mu_ret['avb_change']+\
(1-p_ret)*mu_ret['avc_change']
# generate distribution of per capita contributions for species types
t_fb, t_fu, t_fc = uni(-1/sqrt, 1/sqrt,[3,num_com]) # stdv/mean
avfb, avfu, avfc = uni(0.5,1.5,[3,num_com]) #averages of f
f = {'avb':avfb,'avu':avfu,'avc':avfc,\
'tb':t_fb,'tu':t_fu,'tc':t_fc}
# communities fullfill coexistence
return mu_ret, e_ret,comp_ret,f,p_ret,alpha_ret
def coex_test(mu, e,comp, f = None, p = None, alpha = None):
"""tests if coexistence is given in changed site; see supp. Info 7
Input:
mu, e, comp:
As in output of rand_par
f, alpha, p: optional
Are not needed. They are just used s.t. one can
run coex_test(rand_par(*args))
returns:
coex: array, dtype = boolean
An array with coex rewuirements. True means, that this comunitiy
fullfills the coexistence requirements
Note: Does not check coexistence conditions in reference site,
nor that U species cannot survive in changed site and vice versa
These conditions are always fullfilled by the chosen parameter settings
By changing the above parameters this might become necessary"""
#computes the growthrate of one species
mu_change = lambda x,t: mu['av'+t]*(1+x*mu['t'+t]*sqrt)\
*e['av'+t]*(1/e['av'+t]-(1+x*e['t'+t]*sqrt))/mu['av_change']
# minimal growthrate of all species in changed site, extremum is on boundary
minimal = np.ones(len(comp))
for x,t in [[1,'b'],[-1,'b'],[1,'c'],[-1,'c']]:
minimal = np.amin([mu_change(x,t),minimal], axis = 0)
"""The following checks whether u species are extinct
# maxima on the boundaries
maximal = np.amax(mu_str(1,'u'),mu_str(-1,'u'),axis = 0)
# maxima in interior
loc_max = 0.5*(1/e['tu']*(1/e['avu']-1)-1/mu['tu'])
in_int = np.logical_and(-1<loc_max,loc_max<1)
maximal[in_int]= np.amax(maximal[in_int],mu_str(loc_max[in_int],'u',axis=0)
return np.logical(minimal>comp, maximal<comp)"""
return minimal>comp
def EF_fun(mu,f,alpha,p,site,cov):
""" computes the EF of the given system
Input
mu, f, alpha, p:
as in output of rand_par
s: "change" or "ref"
Site EF is computed for
cov: dict
containing the covariances of mu and f
adjust: boolean
Set to False to see the effect f the adjusment terms
returns:
EF: array
Array containing EF at site
For computational background see Eq. 6"""
s = {"ref": ['u',''], "change": ['c','_change']}[site]
q = 1-p
comp = -alpha*n/(1-alpha*(n-1))
EF1 = n*f['avb']*mu['avb'+s[1]]/(1+alpha)*(cov['b'+s[1]]+1-comp)
EF2 = n*f['av'+s[0]]*mu['av'+s[0]+s[1]]/(1+alpha)*(cov[s[0]+s[1]]+1-comp)
return p*EF1+q*EF2+p*q*n*comp/(1+alpha)*(f['avb']-f['av'+s[0]])\
*(mu['avb'+s[1]]-mu['av'+s[0]+s[1]])
def delta_EF_lin(mu, e,comp,f,p, alpha, sim_f = True):
"""computes \DeltaEF/EF in the case of changing composition
For computational background see Eq. 7
per capita contribution is assumed constant
Input
mu, e, comp, f, alpha, p:
As in output of rand_par
adjust: boolean
Set to False to see the effect f the adjusment terms
sim_f: boolean
If True, all species will have same distributions for f
returns:
deltaEF/EF: array
Array containing 100*deltaEF/EF"""
fi = f.copy()
if sim_f:
for let in ['c','u']:
fi['t'+let] = fi['tb']
fi['av'+let] = fi['avb']
#covariances of the relative distributions
cov = {'b': mu['tb']*fi['tb'], 'u': mu['tu']*fi['tu']}
cov['b_change'] = fi['tb']*(mu['tb']*(1/e['avb']-1)-e['tb'])\
/(1/e['avb']-1-e['tb']*mu['tb'])
cov['c_change'] = fi['tc']*(mu['tc']*(1/e['avc']-1)-e['tc'])\
/(1/e['avc']-1-e['tc']*mu['tc'])
#ecosystem funcitoning at reference site
EF_u = EF_fun(mu,fi,alpha,p,"ref",cov)
#ecosystem funcitoning at changed site
EF_c = EF_fun(mu,fi,alpha,p,"change",cov)
return 100*(EF_c-EF_u)/EF_u #multiply by 100, result in percent
def delta_EF_asym(mu, e,comp,f,p, alpha = None, max_ave_H=1):
"""computes the EF with asymptotic f, f(N) = f_i*H_i*N_i/(N_i+H_i)
For more information see S10
H_i is uniformly distributed in [0,2*ave_H]
Input
mu, e, comp, f, p:
As in output of rand_par
alpha: optional
Is not needed. They are just used s.t. one can
run delta_EF_asym
max_ave_H: scalar, optional
maximum for the average of H, maximum over all communities
returns:
deltaEF/EF: array
Array containing 100*deltaEF/EF, asymptotic contribution to EF"""
num = len(alpha) #number of species
# choose distributions of H: H ~u[0,2*ave]
temp = uni(0,max_ave_H,3)
gam = {'avb':temp[0],'avu':temp[1],'avc':temp[2]}
temp = uni(-1/sqrt, 1/sqrt,3)
gam.update({'tb':temp[0],'tu':temp[1],'tc':temp[2]})
H = lambda x,t: gam['av'+t]*(1+gam['t'+t]*sqrt*x)\
*mu['av'+t]*(1+mu['t'+t]*x*sqrt)
#asymptotic EF in N, f(N) = f_i*H_i*N_i/(N_i+H_i)
#change to consider different contribution to function
eco_fun = lambda x,t, N: f['av'+t]*(1+f['t'+t]*x*sqrt)*H(x,t)*N(x,t)\
/(N(x,t)+H(x,t))
# growthrates in different sites
mu_ref = lambda x,t: mu['av'+t]*(1+x*sqrt*mu['t'+t])
mu_change = lambda x,t: mu['av'+t]*(1+x*sqrt*mu['t'+t])*\
(1-e['av'+t]*(1+e['t'+t]*sqrt*x))
# computes the equilibrium densities of species N, in changed and ref site
N = lambda x,t,mu,avmu: (mu(x,t)-comp*avmu)/(1+alpha)
N_ref = lambda x,t: N(x,t,mu_ref,p*mu['avb']+(1-p)*mu['avu'])
N_change = lambda x,t: N(x,t,mu_change,mu['av_change'])
# integrate over all species for EF
x_simp = np.array(num*[np.linspace(-1,1,51)]) #x_axes
y_ref = {'b': eco_fun(x_simp.T, 'b',N_ref).T,
'u': eco_fun(x_simp.T, 'u',N_ref).T}#y_values in ref
y_cha = {'b': eco_fun(x_simp.T, 'b',N_change).T,
'c': eco_fun(x_simp.T, 'c',N_change).T}#y_values in change
# compute the EF
EF_ref = n*(p*simps(y_ref['b'],x_simp)+(1-p)*simps(y_ref['u'],x_simp))
EF_change = n*(p*simps(y_cha['b'],x_simp)+(1-p)*simps(y_cha['c'],x_simp))
return 100*(EF_change-EF_ref)/EF_ref #multiply by 100 for percent
#load the communities
try:
para = pickle.load(open("repl, com_para.p", "rb"))
except FileNotFoundError: #file not computed yet, will be computed
import parameters_construction
para = parameters_construction.para_return(rand_par)
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
msg -- explanation of the error
"""
def __init__(self, msg):
self.msg = msg
| {
"content_hash": "22fd41abe23ce5834cb1a858b8faca23",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 86,
"avg_line_length": 41.770992366412216,
"alnum_prop": 0.5699317738791423,
"repo_name": "juergspaak/EF-at-invariant-richness",
"id": "3abd09f6c4c2bc0a2a170beb12cd3ed8e0b13484",
"size": "16416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "community_construction_repl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43015"
}
],
"symlink_target": ""
} |
"""Tests for pipelines_common."""
# internal imports
import tensorflow as tf
from magenta.common import testing_lib as common_testing_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
from magenta.pipelines import pipelines_common
from magenta.protobuf import music_pb2
class PipelineUnitsCommonTest(tf.test.TestCase):
def _unit_transform_test(self, unit, input_instance,
expected_outputs):
outputs = unit.transform(input_instance)
self.assertTrue(isinstance(outputs, list))
common_testing_lib.assert_set_equality(self, expected_outputs, outputs)
self.assertEqual(unit.input_type, type(input_instance))
if outputs:
self.assertEqual(unit.output_type, type(outputs[0]))
def testTimeChangeSplitter(self):
note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
time: 2.0
numerator: 3
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
expected_sequences = sequences_lib.split_note_sequence_on_time_changes(
note_sequence)
unit = pipelines_common.TimeChangeSplitter()
self._unit_transform_test(unit, note_sequence, expected_sequences)
def testQuantizer(self):
steps_per_quarter = 4
note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
expected_quantized_sequence = sequences_lib.quantize_note_sequence(
note_sequence, steps_per_quarter)
unit = pipelines_common.Quantizer(steps_per_quarter)
self._unit_transform_test(unit, note_sequence,
[expected_quantized_sequence])
def testRandomPartition(self):
random_partition = pipelines_common.RandomPartition(
str, ['a', 'b', 'c'], [0.1, 0.4])
random_nums = [0.55, 0.05, 0.34, 0.99]
choices = ['c', 'a', 'b', 'c']
random_partition.rand_func = iter(random_nums).next
self.assertEqual(random_partition.input_type, str)
self.assertEqual(random_partition.output_type,
{'a': str, 'b': str, 'c': str})
for i, s in enumerate(['hello', 'qwerty', '1234567890', 'zxcvbnm']):
results = random_partition.transform(s)
self.assertTrue(isinstance(results, dict))
self.assertEqual(set(results.keys()), set(['a', 'b', 'c']))
self.assertEqual(len(results.values()), 3)
self.assertEqual(len([l for l in results.values() if l == []]), 2) # pylint: disable=g-explicit-bool-comparison
self.assertEqual(results[choices[i]], [s])
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "464e83c7a3e3eb9bacf0d0d085b578b1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 118,
"avg_line_length": 37.166666666666664,
"alnum_prop": 0.6236386931454196,
"repo_name": "bda2017-shallowermind/MusTGAN",
"id": "d1530de72254eb115ecbde75e5d133179f2da2f5",
"size": "3717",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/magenta/pipelines/pipelines_common_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12668"
},
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "43259"
},
{
"name": "Jupyter Notebook",
"bytes": "2115912"
},
{
"name": "Protocol Buffer",
"bytes": "12931"
},
{
"name": "Python",
"bytes": "1389487"
},
{
"name": "Shell",
"bytes": "8783"
}
],
"symlink_target": ""
} |
scene.run("BehaviorSetCommon.py")
def setupBehaviorSet():
scene = getScene()
print "Setting up behavior set for Reaching ..."
#scene.loadAssetsFromPath("behaviorsets/reaching/skeletons")
#scene.loadAssetsFromPath("behaviorsets/reaching/motions")
scene.addAssetPath("script", "behaviorsets/reaching/scripts")
assetManager = scene.getAssetManager()
motionPath = "behaviorsets/reaching/motions/"
skel = scene.getSkeleton("common.sk")
if skel == None:
scene.loadAssetsFromPath("behaviorsets/reaching/skeletons")
commonSk = scene.getSkeleton("common.sk")
# mirror all arm and hand motions
preFix = ""
rightHandMotions = StringVec();
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachRtHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachRtMidHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachRtMidLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachLfLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachLfHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachLfMidHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachRtMidLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachRtLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachMiddleHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachMiddleMidHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachMiddleMidLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachMiddleLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_Lf")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_Rt")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_MiddleHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_MiddleLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_MiddleMidHigh")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachClose_MiddleMidLow")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_High1")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_High2")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_Low1")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_Low2")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_MidHigh1")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_MidHigh2")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_MidLow1")
rightHandMotions.append(preFix+"ChrHarmony_Relax001_ArmReachBehind_MidLow2")
rightHandMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Grasp")
rightHandMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Reach")
rightHandMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Release")
rightHandMotions.append("HandsAtSide_RArm_GestureYou")
leftHandMotions = StringVec();
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachRtHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachRtMidHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachRtMidLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachLfLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachLfHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachLfMidHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachRtMidLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachRtLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachMiddleHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachMiddleMidHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachMiddleMidLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachMiddleLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_Lf")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_Rt")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_MiddleHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_MiddleLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_MiddleMidHigh")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachClose_MiddleMidLow")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_High1")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_High2")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_Low1")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_Low2")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_MidHigh1")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_MidHigh2")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_MidLow1")
leftHandMotions.append(preFix+"ChrHarmony_Relax001_LArmReachBehind_MidLow2")
leftHandMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Grasp")
leftHandMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Reach")
leftHandMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Release")
leftHandMotions.append("HandsAtSide_LArm_GestureYou")
for i in range(0,len(rightHandMotions)):
motion = scene.getMotion(rightHandMotions[i])
if motion == None:
assetManager.loadAsset(motionPath+rightHandMotions[i]+'.skm')
motion = scene.getMotion(rightHandMotions[i])
#print 'motionName = ' + locoMotions[i]
if motion != None:
motion.setMotionSkeletonName("common.sk")
mirrorMotion1 = scene.getMotion(rightHandMotions[i])
mirrorMotion1.mirror(leftHandMotions[i], 'common.sk')
def retargetBehaviorSet(charName):
reachMotions = StringVec()
reachMotions.append("ChrHarmony_Relax001_ArmReachRtHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachRtMidHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachRtMidLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachLfLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachLfHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachLfMidHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachRtMidLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachRtLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachMiddleHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachMiddleMidHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachMiddleMidLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachMiddleLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_Lf")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_Rt")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_MiddleHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_MiddleLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_MiddleMidHigh")
reachMotions.append("ChrHarmony_Relax001_ArmReachClose_MiddleMidLow")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_High1")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_High2")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_Low1")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_Low2")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_MidHigh1")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_MidHigh2")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_MidLow1")
reachMotions.append("ChrHarmony_Relax001_ArmReachBehind_MidLow2")
reachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Grasp")
reachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Reach")
reachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Release")
reachMotions.append("HandsAtSide_RArm_GestureYou")
reachMotions.append("ChrHarmony_Relax001_LArmReachRtHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachRtMidHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachRtMidLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachLfLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachLfHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachLfMidHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachRtMidLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachRtLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachMiddleHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachMiddleMidHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachMiddleMidLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachMiddleLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_Lf")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_Rt")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_MiddleHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_MiddleLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_MiddleMidHigh")
reachMotions.append("ChrHarmony_Relax001_LArmReachClose_MiddleMidLow")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_High1")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_High2")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_Low1")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_Low2")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_MidHigh1")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_MidHigh2")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_MidLow1")
reachMotions.append("ChrHarmony_Relax001_LArmReachBehind_MidLow2")
reachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Grasp")
reachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Reach")
reachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Release")
reachMotions.append("HandsAtSide_LArm_GestureYou")
#outDir = scene.getMediaPath() + 'retarget/motion/' + skelName + '/';
#print 'outDir = ' + outDir ;
#if not os.path.exists(outDir):
# os.makedirs(outDir)
# retarget reaching
#for n in range(0, len(reachMotions)):
# retargetMotion(reachMotions[n], 'common.sk', skelName, outDir + 'Reaching/');
assetManager = scene.getAssetManager()
for i in range(0, len(reachMotions)):
sbMotion = assetManager.getMotion(reachMotions[i])
if sbMotion != None:
sbMotion.setMotionSkeletonName('common.sk')
sbChar = scene.getCharacter(charName)
if sbChar == None:
return
skelName = sbChar.getSkeleton().getName()
createRetargetInstance('common.sk', skelName)
scene.run("init-reach.py")
reachSetup(charName, "KNN", 'common.sk', '') | {
"content_hash": "2e001487330f4b61026ab165914f0567",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 82,
"avg_line_length": 54.53367875647668,
"alnum_prop": 0.8120665083135392,
"repo_name": "gsi-upm/SmartSim",
"id": "edafd907e60acd58b7b00c63ebad4a2375d1a98b",
"size": "10525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartbody/data/behaviorsets/BehaviorSetReaching.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11708"
},
{
"name": "C",
"bytes": "941473"
},
{
"name": "C#",
"bytes": "733730"
},
{
"name": "C++",
"bytes": "16389947"
},
{
"name": "CMake",
"bytes": "114424"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "45459"
},
{
"name": "Groff",
"bytes": "2619"
},
{
"name": "HTML",
"bytes": "1128698"
},
{
"name": "Inno Setup",
"bytes": "8592"
},
{
"name": "Java",
"bytes": "371478"
},
{
"name": "M4",
"bytes": "16806"
},
{
"name": "Makefile",
"bytes": "240549"
},
{
"name": "Objective-C",
"bytes": "4511"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13551"
},
{
"name": "Protocol Buffer",
"bytes": "3178"
},
{
"name": "Python",
"bytes": "989019"
},
{
"name": "Rust",
"bytes": "105"
},
{
"name": "Shell",
"bytes": "248995"
},
{
"name": "Smalltalk",
"bytes": "1540"
},
{
"name": "Smarty",
"bytes": "179"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
} |
import argparse
from ConfigParser import ConfigParser
import pexpect
def main(args):
url = args.url
user, host = url.split('@', 1)
cfg_file = 'ssh.cfg'
cfg = ConfigParser()
cfg.read(cfg_file)
passwd = cfg.get(user, host)
child = pexpect.spawn('ssh {0}'.format(url))
child.expect('password:')
child.sendline(passwd)
child.interact()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run ssh through pexpect')
parser.add_argument('url')
args = parser.parse_args()
main(args)
| {
"content_hash": "bc338709e290416f665ebd3a085a9134",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 24.17391304347826,
"alnum_prop": 0.6438848920863309,
"repo_name": "lifuzu/bashrc.d",
"id": "3f73f65b3780dd34f1a53bf36e1acd3deb9f2bf4",
"size": "694",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ssh/login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "694"
},
{
"name": "Shell",
"bytes": "5567"
}
],
"symlink_target": ""
} |
import unittest
import sys, os, time
import json
sys.path.append('./')
import browser.firefox as firefox
import common.shared as shared
from cstests.smoketests.smokecfg import smokecfg
import cspages.login.loginpage as loginpage
import cspages.dashboard.dashboardpage as dashboardpage
# from cstests.smoketests import smokecfg as smokecfg
class TestCSnavigation(unittest.TestCase):
def setUp(self):
# Create a new instance of the Firefox browser
self.browser = firefox.Firefox('firefox')
def tearDown(self):
self.browser.quit_browser()
def test_success(self):
self.browser.set_url(smokecfg['cssite'])
self.loginpage = loginpage.LoginPage(self.browser.get_browser())
# wait for at most 5 minutes, in case we have an anoyingly slow server
shared.Shared.wait_for_element(self.browser.get_browser(), 'class_name', 'select-language', waittime = 300)
# language selection must be done before username and password
self.loginpage.set_language(smokecfg['language'])
shared.Shared.wait_for_element(self.browser.get_browser(), 'class_name', 'fields', waittime = 300)
self.loginpage.set_username(smokecfg['username'])
self.loginpage.set_password(smokecfg['password'])
self.loginpage.login()
shared.Shared.wait_for_element(self.browser.browser, 'id', 'navigation')
time.sleep(3)
self.dashboardpage = dashboardpage.DashboardPage(self.browser.get_browser())
items = self.dashboardpage.get_items()
[self.dashboardpage.navigate_to(item) for item in items]
self.loginpage.logout()
shared.Shared.wait_for_element(self.browser.browser, 'class_name', 'login')
def xtest_failure_8(self):
self.browser.set_url(smokecfg['cssite'])
self.loginpage = loginpage.LoginPage(self.browser.get_browser())
# language selection must be done before username and password
self.loginpage.set_language(smokecfg['language'])
self.loginpage.set_username(smokecfg['sqlinjection_5'])
self.loginpage.set_password(smokecfg['password'])
self.loginpage.login(expect_fail = True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b3bfc7fcb93db78ba375fd0135889369",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 115,
"avg_line_length": 35.96774193548387,
"alnum_prop": 0.6964125560538117,
"repo_name": "DaanHoogland/cloudstack",
"id": "01ea449505ec54a7d5910cbfb6bf18284b0e5035",
"size": "3016",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "test/selenium/cstests/smoketests/navigation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9979"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "343148"
},
{
"name": "Dockerfile",
"bytes": "2375"
},
{
"name": "FreeMarker",
"bytes": "4887"
},
{
"name": "Groovy",
"bytes": "146420"
},
{
"name": "HTML",
"bytes": "153560"
},
{
"name": "Java",
"bytes": "36818077"
},
{
"name": "JavaScript",
"bytes": "8264908"
},
{
"name": "Python",
"bytes": "12533840"
},
{
"name": "Ruby",
"bytes": "22732"
},
{
"name": "SCSS",
"bytes": "362625"
},
{
"name": "Shell",
"bytes": "708848"
},
{
"name": "XSLT",
"bytes": "57835"
}
],
"symlink_target": ""
} |
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='1')
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# add role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1')
# Group assignments
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group - try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
for role in roles:
api.keystone.remove_group_role(IsA(http.HttpRequest),
role=role.id,
group='1',
project=self.tenant.id)
# member group 1 - has role 1, will remove it
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='1',
group='2',
project=self.tenant.id)
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='2',
project=self.tenant.id)
# member group 3 - has role 2
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
# add role 1
api.keystone.add_group_role(IsA(http.HttpRequest),
role='1',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# Group assignment
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group 1- try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
# member group 1 - has no change
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[1],))
# member group 3 - has role 1
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Uptime (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
| {
"content_hash": "b56b7e28f769178dfd2f35b93d785de7",
"timestamp": "",
"source": "github",
"line_count": 1805,
"max_line_length": 81,
"avg_line_length": 44.89418282548476,
"alnum_prop": 0.5220031098057606,
"repo_name": "AlexOugh/horizon",
"id": "bf8d6ae197b1fd70b6a84a19b3d39cb1f2f3eeef",
"size": "81639",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/identity/projects/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1000458"
},
{
"name": "JavaScript",
"bytes": "244031"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "4545176"
},
{
"name": "Shell",
"bytes": "18285"
}
],
"symlink_target": ""
} |
"""Module contains classes presenting Element and Specie (Element + oxidation state) and PeriodicTable."""
import re
import json
import warnings
from io import open
from pathlib import Path
from enum import Enum
from typing import Optional, Callable
from itertools import product, \
combinations
from collections import Counter
import numpy as np
from pymatgen.core.units import Mass, Length, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
# Loads element data from json file
with open(str(Path(__file__).absolute().parent / "periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
class Element(Enum):
"""Enum representing an element in the periodic table."""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
def __init__(self, symbol: str):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_post_transition_metal
True if element is a post transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: iupac_ordering
Ordering according to Table VI of "Nomenclature of Inorganic Chemistry
(IUPAC Recommendations 2005)". This ordering effectively follows the
groups and rows of the periodic table, except the Lanthanides, Actanides
and hydrogen.
.. attribute:: long_name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: atomic_orbitals
Atomic Orbitals. Energy of the atomic orbitals as a dict.
E.g., The orbitals energies in eV are represented as
{'1s': -1.0, '2s': -0.1}
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
The LDA values for neutral atoms are used
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: average_cationic_radius
Average cationic radius for element in ang. The average is taken over all
positive oxidation states of the element for which data is present.
.. attribute:: average_anionic_radius
Average ionic radius for element in ang. The average is taken over all
negative oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self._atomic_radius = None
else:
self._atomic_radius = Length(at_r, "ang")
self._atomic_mass = Mass(d["Atomic mass"], "amu")
self.long_name = d["Name"]
self._data = d
@property
def X(self):
"""
:return: Electronegativity of element. Note that if an element does not
have an electronegativity, a NaN float is returned.
"""
if "X" in self._data:
return self._data["X"]
warnings.warn("No electronegativity for %s. Setting to NaN. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("NaN")
@property
def atomic_radius(self):
"""
Returns: The atomic radius of the element in Ångstroms.
"""
return self._atomic_radius
@property
def atomic_mass(self):
"""
Returns: The atomic mass of the element in amu.
"""
return self._atomic_mass
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius", "atomic_orbitals",
"coefficient_of_linear_thermal_expansion",
"ground_state_term_symbol", "valence"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
elif isinstance(val, dict):
pass
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif item == "coefficient_of_linear_thermal_expansion":
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError("Element has no attribute %s!" % item)
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
radius = sum(radii.values()) / len(radii)
else:
radius = 0.0
return FloatWithUnit(radius, "ang")
@property
def average_cationic_radius(self):
"""
Average cationic radius for element (with units). The average is
taken over all positive oxidation states of the element for which
data is present.
"""
if "Ionic radii" in self._data:
radii = [v for k, v in self._data["Ionic radii"].items()
if int(k) > 0]
if radii:
return FloatWithUnit(sum(radii) / len(radii), "ang")
return FloatWithUnit(0.0, "ang")
@property
def average_anionic_radius(self):
"""
Average anionic radius for element (with units). The average is
taken over all negative oxidation states of the element for which
data is present.
"""
if "Ionic radii" in self._data:
radii = [v for k, v in self._data["Ionic radii"].items()
if int(k) < 0]
if radii:
return FloatWithUnit(sum(radii) / len(radii), "ang")
return FloatWithUnit(0.0, "ang")
@property
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): FloatWithUnit(v, "ang") for k, v in self._data["Ionic radii"].items()}
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def metallic_radius(self):
"""
Metallic radius of the element. Radius is given in ang.
"""
return FloatWithUnit(self._data["Metallic radius"], "ang")
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
@property
def valence(self):
"""
# From full electron config obtain valence subshell
# angular moment (L) and number of valence e- (v_e)
"""
# the number of valence of noble gas is 0
if self.group == 18:
return (np.nan, 0)
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
valence = []
full_electron_config = self.full_electronic_structure
for _, l_symbol, ne in full_electron_config[::-1]:
l = L_symbols.lower().index(l_symbol)
if ne < (2 * l + 1) * 2:
valence.append((l, ne))
if len(valence) > 1:
raise ValueError("Ambiguous valence")
return valence[0]
@property
def term_symbols(self):
"""
All possible Russell-Saunders term symbol of the Element
eg. L = 1, n_e = 2 (s2)
returns
[['1D2'], ['3P0', '3P1', '3P2'], ['1S0']]
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
L, v_e = self.valence
# for one electron in subshell L
ml = list(range(-L, L + 1))
ms = [1 / 2, -1 / 2]
# all possible configurations of ml,ms for one e in subshell L
ml_ms = list(product(ml, ms))
# Number of possible configurations for r electrons in subshell L.
n = (2 * L + 1) * 2
# the combination of n_e electrons configurations
# C^{n}_{n_e}
e_config_combs = list(combinations(range(n), v_e))
# Total ML = sum(ml1, ml2), Total MS = sum(ms1, ms2)
TL = [sum([ml_ms[comb[e]][0] for e in range(v_e)])
for comb in e_config_combs]
TS = [sum([ml_ms[comb[e]][1] for e in range(v_e)])
for comb in e_config_combs]
comb_counter = Counter([r for r in zip(TL, TS)])
term_symbols = []
while sum(comb_counter.values()) > 0:
# Start from the lowest freq combination,
# which corresponds to largest abs(L) and smallest abs(S)
L, S = min(comb_counter)
J = list(np.arange(abs(L - S), abs(L) + abs(S) + 1))
term_symbols.append([str(int(2 * (abs(S)) + 1)) + L_symbols[abs(L)] + str(j) for j in J])
# Without J
# term_symbols.append(str(int(2 * (abs(S)) + 1)) \
# + L_symbols[abs(L)])
# Delete all configurations included in this term
for ML in range(-L, L - 1, -1):
for MS in np.arange(S, -S + 1, 1):
if (ML, MS) in comb_counter:
comb_counter[(ML, MS)] -= 1
if comb_counter[(ML, MS)] == 0:
del comb_counter[(ML, MS)]
return term_symbols
@property
def ground_state_term_symbol(self):
"""
Ground state term symbol
Selected based on Hund's Rule
"""
L_symbols = 'SPDFGHIKLMNOQRTUVWXYZ'
term_symbols = self.term_symbols
term_symbol_flat = {term: {"multiplicity": int(term[0]),
"L": L_symbols.index(term[1]),
"J": float(term[2:])}
for term in sum(term_symbols, [])}
multi = [int(item['multiplicity'])
for terms, item in term_symbol_flat.items()]
max_multi_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['multiplicity'] == max(multi)}
Ls = [item['L'] for terms, item in max_multi_terms.items()]
max_L_terms = {symbol: item
for symbol, item in term_symbol_flat.items()
if item['L'] == max(Ls)}
J_sorted_terms = sorted(max_L_terms.items(),
key=lambda k: k[1]['J'])
L, v_e = self.valence
if v_e <= (2 * L + 1):
return J_sorted_terms[0][0]
return J_sorted_terms[-1][0]
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
x1 = float("inf") if self.X != self.X else self.X
x2 = float("inf") if other.X != other.X else other.X
if x1 != x2:
return x1 < x2
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z: int):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row: int, group: int):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol: str):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
return symbol in Element.__members__
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
if 89 <= z <= 103:
return 9
for i, size in enumerate(_pt_row_sizes):
total += size
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
if (z - 2) % 8 <= 2:
return (z - 2) % 8
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
if (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
if (self.is_actinoid or self.is_lanthanoid) and self.Z not in [71, 103]:
return "f"
if self.is_actinoid or self.is_lanthanoid:
return "d"
if self.group in [1, 2]:
return "s"
if self.group in range(13, 19):
return "p"
if self.group in range(3, 13):
return "d"
raise ValueError("unable to determine block")
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_post_transition_metal(self):
"""
True if element is a post-transition or poor metal.
"""
return self.symbol in ("Al", "Ga", "In", "Tl", "Sn", "Pb", "Bi")
@property
def is_rare_earth_metal(self) -> bool:
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metal(self) -> bool:
"""
:return: True if is a metal.
"""
return (self.is_alkali or self.is_alkaline or
self.is_post_transition_metal or self.is_transition_metal or
self.is_lanthanoid or self.is_actinoid)
@property
def is_metalloid(self) -> bool:
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self) -> bool:
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self) -> bool:
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
@property
def is_quadrupolar(self):
"""
Checks if this element can be quadrupolar
"""
return len(self.data.get("NMR Quadrupole Moment", {})) > 0
@property
def nmr_quadrupole_moment(self):
"""
Get a dictionary the nuclear electric quadrupole moment in units of
e*millibarns for various isotopes
"""
return {k: FloatWithUnit(v, "mbarn")
for k, v in self.data.get("NMR Quadrupole Moment", {}).items()}
@property
def iupac_ordering(self):
"""
Ordering according to Table VI of "Nomenclature of Inorganic Chemistry
(IUPAC Recommendations 2005)". This ordering effectively follows the
groups and rows of the periodic table, except the Lanthanides, Actanides
and hydrogen.
"""
return self._data["IUPAC ordering"]
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function: Optional[Callable] = None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
"""
supported_properties = ("spin",)
def __init__(self, symbol: str,
oxidation_state: float = 0.0,
properties: dict = None):
"""
Initializes a Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
return getattr(self._el, a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return (isinstance(other, Specie) and self.symbol == other.symbol
and self.oxi_state == other.oxi_state
and self._properties == other._properties)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state, followed by spin.
"""
x1 = float("inf") if self.X != self.X else self.X
x2 = float("inf") if other.X != other.X else other.X
if x1 != x2:
return x1 < x2
if self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
if self.oxi_state:
other_oxi = 0 if (isinstance(other, Element)
or other.oxi_state is None) else other.oxi_state
return self.oxi_state < other_oxi
if getattr(self, "spin", False):
other_spin = getattr(other, "spin", 0)
return self.spin < other_spin
return False
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
if oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string: str):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",", "").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_nmr_quadrupole_moment(self, isotope=None):
"""
Gets the nuclear electric quadrupole moment in units of
e*millibarns
Args:
isotope (str): the isotope to get the quadrupole moment for
default is None, which gets the lowest mass isotope
"""
quad_mom = self._el.nmr_quadrupole_moment
if not quad_mom:
return 0.0
if isotope is None:
isotopes = list(quad_mom.keys())
isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False)
return quad_mom.get(isotopes[0], 0.0)
if isotope not in quad_mom:
raise ValueError("No quadrupole moment for isotope {}".format(
isotope))
return quad_mom.get(isotope, 0.0)
def get_shannon_radius(self, cn: str, spin: str = "",
radius_type: str = "ionic"):
"""
Get the local environment specific ionic radius for species.
Args:
cn (str): Coordination using roman letters. Supported values are
I-IX, as well as IIIPY, IVPY and IVSQ.
spin (str): Some species have different radii for different
spins. You can get specific values using "High Spin" or
"Low Spin". Leave it as "" if not available. If only one spin
data is available, it is returned and this spin parameter is
ignored.
radius_type (str): Either "crystal" or "ionic" (default).
Returns:
Shannon radius for specie in the specified environment.
"""
radii = self._el.data["Shannon radii"]
radii = radii[str(int(self._oxi_state))][cn] # type: ignore
if len(radii) == 1: # type: ignore
k, data = list(radii.items())[0] # type: ignore
if k != spin:
warnings.warn(
"Specified spin state of %s not consistent with database "
"spin of %s. Only one spin data available, and "
"that value is returned." % (spin, k)
)
else:
data = radii[spin]
return data["%s_radius" % radius_type]
def get_crystal_field_spin(self, coordination: str = "oct",
spin_config: str = "high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}".format(self.oxi_state, self.symbol))
if spin_config == "high":
if nelectrons <= 5:
return nelectrons
return 10 - nelectrons
if spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
if nelectrons <= 6:
return 6 - nelectrons
if nelectrons <= 8:
return nelectrons - 6
return 10 - nelectrons
if coordination == "tet":
if nelectrons <= 2:
return nelectrons
if nelectrons <= 4:
return 4 - nelectrons
if nelectrons <= 7:
return nelectrons - 4
return 10 - nelectrons
raise RuntimeError()
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
"""
:return: Json-able dictionary representation.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Specie.
"""
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number equal to the hash
number of the symbol. Obviously, it makes no sense whatsoever to use
the atomic number of a Dummy specie for anything scientific. The purpose
of this is to ensure that for most use cases, a DummySpecie behaves no
differently from an Element or Specie.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self,
symbol: str = "X",
oxidation_state: float = 0,
properties: dict = None):
"""
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
"""
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return (isinstance(other, Specie) and
self.symbol == other.symbol and
self.oxi_state == other.oxi_state and
self._properties == other._properties)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
if self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self) -> int:
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self) -> float:
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self) -> float:
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0.0
@property
def symbol(self) -> str:
"""
:return: Symbol for DummySpecie.
"""
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string: str):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0.0
else:
oxi = 1.0 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
def as_dict(self):
"""
:return: MSONAble dict representation.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: DummySpecie
"""
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except Exception:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
| {
"content_hash": "03ac7a8f45b963ad80fa1cf6b53db4fc",
"timestamp": "",
"source": "github",
"line_count": 1571,
"max_line_length": 106,
"avg_line_length": 31.476129853596436,
"alnum_prop": 0.5283221096483245,
"repo_name": "tschaume/pymatgen",
"id": "18f171406e6c07fdb8b90b0a673392134f6ab47e",
"size": "49560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/core/periodic_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7560590"
},
{
"name": "Roff",
"bytes": "4298591"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class EndPointsTestJSON(base.BaseIdentityV2AdminTest):
@classmethod
def resource_setup(cls):
super(EndPointsTestJSON, cls).resource_setup()
cls.service_ids = list()
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
cls.service_data = cls.services_client.create_service(
name=s_name, type=s_type,
description=s_description)['OS-KSADM:service']
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
# Create endpoints so as to use for LIST and GET test cases
cls.setup_endpoints = list()
for i in range(2):
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.endpoints_client.create_endpoint(
service_id=cls.service_id,
region=region,
publicurl=url,
adminurl=url,
internalurl=url)['endpoint']
# list_endpoints() will return 'enabled' field
endpoint['enabled'] = True
cls.setup_endpoints.append(endpoint)
@classmethod
def resource_cleanup(cls):
for e in cls.setup_endpoints:
cls.endpoints_client.delete_endpoint(e['id'])
for s in cls.service_ids:
cls.services_client.delete_service(s)
super(EndPointsTestJSON, cls).resource_cleanup()
@test.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
def test_list_endpoints(self):
# Get a list of endpoints
fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
# Asserting LIST endpoints
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
self.assertEqual(0, len(missing_endpoints),
"Failed to find endpoint %s in fetched list" %
', '.join(str(e) for e in missing_endpoints))
@test.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1')
def test_create_list_delete_endpoint(self):
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = self.endpoints_client.create_endpoint(
service_id=self.service_id,
region=region,
publicurl=url,
adminurl=url,
internalurl=url)['endpoint']
# Asserting Create Endpoint response body
self.assertIn('id', endpoint)
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['publicurl'])
# Checking if created endpoint is present in the list of endpoints
fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
self.assertIn(endpoint['id'], fetched_endpoints_id)
# Deleting the endpoint created in this method
self.endpoints_client.delete_endpoint(endpoint['id'])
# Checking whether endpoint is deleted successfully
fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
self.assertNotIn(endpoint['id'], fetched_endpoints_id)
| {
"content_hash": "fcbca992815f75ed47ee71db0e3bd9f3",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 44.63636363636363,
"alnum_prop": 0.6287459994180972,
"repo_name": "Tesora/tesora-tempest",
"id": "651a3161ffe4109999b1f75cf97d0a6518ed7f8d",
"size": "4073",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v2/test_endpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3633718"
},
{
"name": "Shell",
"bytes": "9310"
}
],
"symlink_target": ""
} |
"""The tests for the Demo Media player platform."""
import datetime
import socket
import unittest
import pysonos.snapshot
from unittest import mock
import pysonos
from pysonos import alarms
from homeassistant.setup import setup_component
from homeassistant.components.media_player import sonos, DOMAIN
from homeassistant.components.media_player.sonos import CONF_INTERFACE_ADDR
from homeassistant.const import CONF_HOSTS, CONF_PLATFORM
from tests.common import get_test_home_assistant
ENTITY_ID = 'media_player.kitchen'
class pysonosDiscoverMock():
"""Mock class for the pysonos.discover method."""
def discover(interface_addr):
"""Return tuple of pysonos.SoCo objects representing found speakers."""
return {SoCoMock('192.0.2.1')}
class AvTransportMock():
"""Mock class for the avTransport property on pysonos.SoCo object."""
def __init__(self):
"""Initialize ethe Transport mock."""
pass
def GetMediaInfo(self, _):
"""Get the media details."""
return {
'CurrentURI': '',
'CurrentURIMetaData': ''
}
class MusicLibraryMock():
"""Mock class for the music_library property on pysonos.SoCo object."""
def get_sonos_favorites(self):
"""Return favorites."""
return []
class SoCoMock():
"""Mock class for the pysonos.SoCo object."""
def __init__(self, ip):
"""Initialize SoCo object."""
self.ip_address = ip
self.is_visible = True
self.volume = 50
self.mute = False
self.play_mode = 'NORMAL'
self.night_mode = False
self.dialog_mode = False
self.music_library = MusicLibraryMock()
self.avTransport = AvTransportMock()
def get_sonos_favorites(self):
"""Get favorites list from sonos."""
return {'favorites': []}
def get_speaker_info(self, force):
"""Return a dict with various data points about the speaker."""
return {'serial_number': 'B8-E9-37-BO-OC-BA:2',
'software_version': '32.11-30071',
'uid': 'RINCON_B8E937BOOCBA02500',
'zone_icon': 'x-rincon-roomicon:kitchen',
'mac_address': 'B8:E9:37:BO:OC:BA',
'zone_name': 'Kitchen',
'model_name': 'Sonos PLAY:1',
'hardware_version': '1.8.1.2-1'}
def get_current_transport_info(self):
"""Return a dict with the current state of the speaker."""
return {'current_transport_speed': '1',
'current_transport_state': 'STOPPED',
'current_transport_status': 'OK'}
def get_current_track_info(self):
"""Return a dict with the current track information."""
return {'album': '',
'uri': '',
'title': '',
'artist': '',
'duration': '0:00:00',
'album_art': '',
'position': '0:00:00',
'playlist_position': '0',
'metadata': ''}
def is_coordinator(self):
"""Return true if coordinator."""
return True
def join(self, master):
"""Join speaker to a group."""
return
def set_sleep_timer(self, sleep_time_seconds):
"""Set the sleep timer."""
return
def unjoin(self):
"""Cause the speaker to separate itself from other speakers."""
return
def uid(self):
"""Return a player uid."""
return "RINCON_XXXXXXXXXXXXXXXXX"
def group(self):
"""Return all group data of this player."""
return
def add_entities_factory(hass):
"""Add devices factory."""
def add_entities(devices, update_befor_add=False):
"""Fake add device."""
hass.data[sonos.DATA_SONOS].devices = devices
return add_entities
class TestSonosMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def monkey_available(self):
"""Make a monkey available."""
return True
# Monkey patches
self.real_available = sonos.SonosDevice.available
sonos.SonosDevice.available = monkey_available
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
# Monkey patches
sonos.SonosDevice.available = self.real_available
self.hass.stop()
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_discovery(self, *args):
"""Test a single device using the autodiscovery provided by HASS."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
devices = list(self.hass.data[sonos.DATA_SONOS].devices)
self.assertEqual(len(devices), 1)
self.assertEqual(devices[0].name, 'Kitchen')
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch('pysonos.discover')
def test_ensure_setup_config_interface_addr(self, discover_mock, *args):
"""Test an interface address config'd by the HASS config file."""
discover_mock.return_value = {SoCoMock('192.0.2.1')}
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_INTERFACE_ADDR: '192.0.1.1',
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(self.hass.data[sonos.DATA_SONOS].devices), 1)
self.assertEqual(discover_mock.call_count, 1)
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_single(self, *args):
"""Test a single address config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
self.assertEqual(len(devices), 1)
self.assertEqual(devices[0].name, 'Kitchen')
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_multiple(self, *args):
"""Test multiple address string config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1,192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
self.assertEqual(len(devices), 2)
self.assertEqual(devices[0].name, 'Kitchen')
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_list(self, *args):
"""Test a multiple address list config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1', '192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
devices = self.hass.data[sonos.DATA_SONOS].devices
self.assertEqual(len(devices), 2)
self.assertEqual(devices[0].name, 'Kitchen')
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch.object(pysonos, 'discover', new=pysonosDiscoverMock.discover)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_sonos_discovery(self, *args):
"""Test a single device using the autodiscovery provided by Sonos."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass))
devices = list(self.hass.data[sonos.DATA_SONOS].devices)
self.assertEqual(len(devices), 1)
self.assertEqual(devices[0].name, 'Kitchen')
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_set_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensure pysonos methods called for sonos_set_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
device.set_sleep_timer(30)
set_sleep_timerMock.assert_called_once_with(30)
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_clear_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensure pysonos method called for sonos_clear_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
device.set_sleep_timer(None)
set_sleep_timerMock.assert_called_once_with(None)
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('pysonos.alarms.Alarm')
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_set_alarm(self, pysonos_mock, alarm_mock, *args):
"""Ensure pysonos methods called for sonos_set_sleep_timer service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
alarm1 = alarms.Alarm(pysonos_mock)
alarm1.configure_mock(_alarm_id="1", start_time=None, enabled=False,
include_linked_zones=False, volume=100)
with mock.patch('pysonos.alarms.get_alarms', return_value=[alarm1]):
attrs = {
'time': datetime.time(12, 00),
'enabled': True,
'include_linked_zones': True,
'volume': 0.30,
}
device.set_alarm(alarm_id=2)
alarm1.save.assert_not_called()
device.set_alarm(alarm_id=1, **attrs)
self.assertEqual(alarm1.enabled, attrs['enabled'])
self.assertEqual(alarm1.start_time, attrs['time'])
self.assertEqual(alarm1.include_linked_zones,
attrs['include_linked_zones'])
self.assertEqual(alarm1.volume, 30)
alarm1.save.assert_called_once_with()
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(pysonos.snapshot.Snapshot, 'snapshot')
def test_sonos_snapshot(self, snapshotMock, *args):
"""Ensure pysonos methods called for sonos_snapshot service."""
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
snapshotMock.return_value = True
device.snapshot()
self.assertEqual(snapshotMock.call_count, 1)
self.assertEqual(snapshotMock.call_args, mock.call())
@mock.patch('pysonos.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(pysonos.snapshot.Snapshot, 'restore')
def test_sonos_restore(self, restoreMock, *args):
"""Ensure pysonos methods called for sonos_restore service."""
from pysonos.snapshot import Snapshot
sonos.setup_platform(self.hass, {}, add_entities_factory(self.hass), {
'host': '192.0.2.1'
})
device = list(self.hass.data[sonos.DATA_SONOS].devices)[-1]
device.hass = self.hass
restoreMock.return_value = True
device._snapshot_coordinator = mock.MagicMock()
device._snapshot_coordinator.soco_device = SoCoMock('192.0.2.17')
device._soco_snapshot = Snapshot(device._player)
device.restore()
self.assertEqual(restoreMock.call_count, 1)
self.assertEqual(restoreMock.call_args, mock.call(False))
| {
"content_hash": "0d522d2856c537b56e3e1007cb7ac9c3",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 79,
"avg_line_length": 37.0791788856305,
"alnum_prop": 0.6088263207845619,
"repo_name": "persandstrom/home-assistant",
"id": "cb3da3ab8998d73e31ad682930161e20e3991178",
"size": "12644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/media_player/test_sonos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ENDI_FEED = 'http://elnuevodia.feedsportal.com/c/34275/f/623466/index.rss'
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
PROJECT_PATH = os.sep.join(os.path.realpath(os.path.dirname(__file__)).split('/'))
PROJECT_NAME = PROJECT_PATH.split('/')[-1]
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# 'NAME': '%s/core/dev.sqlite3' % PROJECT_PATH, # Or path to database file if using sqlite3.
# 'USER': '', # Not used with sqlite3.
# 'PASSWORD': '', # Not used with sqlite3.
# 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
# 'PORT': '', # Set to empty string for default. Not used with sqlite3.
# }
# }
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Puerto_Rico'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 's8pmbma9sipcc00hg=pvap(_-djgq64ynkx7a#wz$owihz(ggd'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'angrytards.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "026c75a8b486d50f6225236adcc55a14",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 124,
"avg_line_length": 35.026490066225165,
"alnum_prop": 0.6825486859519758,
"repo_name": "gcollazo/angryt",
"id": "f1502d209f81998b720515b6a25443d8b48a89fd",
"size": "5331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angrytards/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "36112"
},
{
"name": "Python",
"bytes": "12184"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
import os
import shutil
import tempfile
import yaml
class FilemakerBase(object): # pragma: nocover
"""Override marked methods to do something useful. Base class serves as
a dry-run step generator.
"""
def __init__(self, root, fdef):
self.fdef = fdef
self.goto_root(root)
self._makefiles(fdef)
def goto_root(self, dirname):
"""override
"""
print "pushd", dirname
def makedir(self, dirname, content):
"""override, but call self.make_list(content)
"""
print "mkdir " + dirname
print "pushd " + dirname
self.make_list(content)
print "popd"
def make_file(self, filename, content):
"""override
"""
print "create file: %s %r" % (filename, content)
def make_empty_file(self, fname):
"""override
"""
print "touch", fname
def _make_empty_file(self, fname):
if fname != 'empty':
self.make_empty_file(fname)
def make_list(self, lst):
for item in lst:
self._makefiles(item)
def _makefiles(self, f):
if isinstance(f, dict):
for k, v in f.items():
if isinstance(v, list):
self.makedir(dirname=k, content=v)
elif isinstance(v, basestring):
self.make_file(filename=k, content=v)
else: # pragma: nocover
raise ValueError("Unexpected:", k, v)
elif isinstance(f, basestring):
self._make_empty_file(f)
elif isinstance(f, list):
self.make_list(f)
else: # pragma: nocover
raise ValueError("Unknown type:", f)
class Filemaker(FilemakerBase):
def goto_root(self, dirname):
os.chdir(dirname)
def makedir(self, dirname, content):
cwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
self.make_list(content)
os.chdir(cwd)
def make_file(self, filename, content):
open(filename, 'w').write(content)
def make_empty_file(self, fname):
open(fname, 'w').close()
@contextmanager
def create_files(filedef, cleanup=True):
fdef = yaml.load(filedef)
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
Filemaker(tmpdir, fdef)
if not cleanup:
print "TMPDIR =", tmpdir
yield tmpdir
finally:
os.chdir(cwd)
if cleanup:
shutil.rmtree(tmpdir, ignore_errors=True)
| {
"content_hash": "86cc3080ab8c19caefe9eb0c96bf7d11",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 76,
"avg_line_length": 26.391752577319586,
"alnum_prop": 0.559375,
"repo_name": "thebjorn/pydeps",
"id": "30362ab625e22c789c0c5ba0dc5a01077fb9db85",
"size": "2584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/filemaker.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "192"
},
{
"name": "Python",
"bytes": "316728"
}
],
"symlink_target": ""
} |
import time
import json
from websocket import create_connection
from behave import given, when, then, step
from assertpy import assert_that
from websocketservice import WebSocketService
@given( 'the websocket connection is open' )
def step_impl( context ):
context.ws = create_connection( context.websockurl )
@given( 'the websocket service is listening' )
def step_impl( context ):
context.events = []
def onMessage( ws, message ):
event = json.loads( message )
event['size'] = len( message )
event['recv'] = time.time() * 1000 * 1000
event['id'] = event['tick']
context.events.append( event )
context.service = WebSocketService( context.websockurl, onMessage )
time.sleep( 0.1 )
@when( 'sending a ping with payload size {size:d}' )
def step_impl( context, size ):
context.ws.ping( bytearray( [0xFE]*size ) )
@then( 'a pong is received with payload size {size:d}' )
def step_impl( context, size ):
( opcode, payload ) = context.ws.recv_data( True )
assert_that( opcode ).is_equal_to ( 0xA )
assert_that( payload ).is_length( size )
if size > 0:
assert_that( payload ).contains_only( 0xFE )
@then( 'the websocket connection can be closed' )
def step_impl( context ):
context.ws.close()
@then( 'the websocket service can be closed' )
def step_impl( context ):
context.service.stop()
| {
"content_hash": "686d83eec956046f2d86a8c63773da70",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 30.434782608695652,
"alnum_prop": 0.6635714285714286,
"repo_name": "Eelco81/server-test-project",
"id": "e1ae116a252092a708ccc36a9f82275b2c68b515",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/steps/websockets-steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "346824"
},
{
"name": "CSS",
"bytes": "116"
},
{
"name": "Gherkin",
"bytes": "7972"
},
{
"name": "HTML",
"bytes": "170"
},
{
"name": "JavaScript",
"bytes": "22080"
},
{
"name": "Makefile",
"bytes": "8760"
},
{
"name": "Python",
"bytes": "15678"
}
],
"symlink_target": ""
} |
"""Recipe r"""
import os
from mako.template import Template
from birdhousebuilder.recipe import conda
r_install_script = Template(
"""
% for pkg in pkg_list:
install.packages("${pkg}", dependencies = TRUE, repo="${repo}")
% endfor
"""
)
def install_pkgs(pkgs, repo, prefix):
from subprocess import check_call
from tempfile import NamedTemporaryFile
pkg_list = conda.split_args(pkgs)
if len(pkg_list) > 0:
result = r_install_script.render(
pkg_list=pkg_list,
repo=repo
)
fp = NamedTemporaryFile(suffix='.R', prefix='install', delete=False)
fp.write(result)
fp.close()
cmd = '%s/bin/R --no-save < %s' % (prefix, fp.name)
check_call(cmd, shell=True)
try:
os.remove(fp.name)
except OSError:
pass
return pkg_list
class Recipe(object):
"""This recipe is used by zc.buildout"""
def __init__(self, buildout, name, options):
self.buildout, self.name, self.options = buildout, name, options
b_options = buildout['buildout']
self.prefix = b_options.get('anaconda-home', conda.anaconda_home())
self.repo = options.get('repo', "http://ftp5.gwdg.de/pub/misc/cran")
self.pkgs = options.get('pkgs', '')
self.on_update = conda.as_bool(options.get('on-update', 'false'))
def install(self):
self.execute()
return tuple()
def update(self):
if self.on_update:
self.execute()
return tuple()
def execute(self):
#self.install_r()
self.install_pkgs()
def install_r(self):
script = conda.Recipe(
self.buildout,
self.name,
{'pkgs': 'r'})
return script.install()
def install_pkgs(self):
return install_pkgs(self.pkgs, self.repo, self.prefix)
def uninstall(name, options):
pass
| {
"content_hash": "50a83f1bbd9c2c9d5497d7e01b317d69",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 76,
"avg_line_length": 24.468354430379748,
"alnum_prop": 0.5773409208484221,
"repo_name": "bird-house/birdhousebuilder.recipe.r",
"id": "f7a86e77300d211e155e2c98e815ded96e4099e9",
"size": "1988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "birdhousebuilder/recipe/r/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16816"
}
],
"symlink_target": ""
} |
from _thread import start_new_thread
from .socket_server import Server
def init():
start_new_thread(Server.start, ())
| {
"content_hash": "31af43e7cedc99867aa1468d17b6505f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 17.857142857142858,
"alnum_prop": 0.72,
"repo_name": "JanlizWorldlet/FeelUOwn",
"id": "345f9079f8abfe00bf451caafaa6d9557e951d45",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "history_research/Cli/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3719"
},
{
"name": "HTML",
"bytes": "14979"
},
{
"name": "JavaScript",
"bytes": "4940"
},
{
"name": "Makefile",
"bytes": "7426"
},
{
"name": "Python",
"bytes": "166531"
},
{
"name": "Shell",
"bytes": "6603"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import time
from pysolar.util import get_sunrise_sunset
START_DATE = datetime.strptime('2013-01-31', "%Y-%m-%d")
LAT, LON = 52.518611111111, 13.408055555556
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def get_operating_hours():
today = datetime.now()
operating_hours = 0
for single_date in daterange(START_DATE, today):
day = datetime.fromtimestamp(time.mktime(single_date.timetuple()))
sun_rise_set = get_sunrise_sunset(LAT, LON, day)
daily_operating_hours = sun_rise_set[1] - sun_rise_set[0]
operating_hours += divmod(daily_operating_hours.total_seconds(), 3600)[0]
return 1.05 * operating_hours # adding 5% overhead
def get_operating_days():
today = datetime.now()
delta = today - START_DATE
return delta.days
| {
"content_hash": "afa7d0c73d28548e924b25ab6eb329ce",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 81,
"avg_line_length": 32.5,
"alnum_prop": 0.6802197802197802,
"repo_name": "morreene/tradenews",
"id": "1f5b5c57328840576c1cda010ef63b42dc203ce5",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tradenews/public/helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1572055"
},
{
"name": "HTML",
"bytes": "464815"
},
{
"name": "JavaScript",
"bytes": "5197624"
},
{
"name": "PHP",
"bytes": "8415"
},
{
"name": "Python",
"bytes": "50512"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
class Consumer(object):
"""
A class for consuming resources
"""
API_KEY = 'abcdefghijklmnopqustuvwxyz'
def process_resource(self, resource):
"""
This method processes Resource objects and returns a printable string
"""
response = resource.make_a_network_heavy_call(self.API_KEY)
return '\'{0}\' is the data'.format(response)
| {
"content_hash": "fff5e485981b9186c305734f0df46ba9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.6347150259067358,
"repo_name": "ambitioninc/ambition-py-tests-guide",
"id": "b775cf367079ab7633f06005d2651ec50d31d12f",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests_guide/consumer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12018"
}
],
"symlink_target": ""
} |
import sys, os, re, json
import itertools
from collections import Counter
import time
from numpy import *
import pandas as pd
def invert_dict(d):
return {v:k for k,v in d.iteritems()}
def flatten1(lst):
return list(itertools.chain.from_iterable(lst))
def load_wv_pandas(fname):
return pd.read_hdf(fname, 'data')
def extract_wv(df):
num_to_word = dict(enumerate(df.index))
word_to_num = invert_dict(num_to_word)
wv = df.as_matrix()
return wv, word_to_num, num_to_word
def canonicalize_digits(word):
if any([c.isalpha() for c in word]): return word
word = re.sub("\d", "DG", word)
if word.startswith("DG"):
word = word.replace(",", "") # remove thousands separator
return word
def canonicalize_word(word, wordset=None, digits=True):
word = word.lower()
if digits:
if (wordset != None) and (word in wordset): return word
word = canonicalize_digits(word) # try to canonicalize numbers
if (wordset == None) or (word in wordset): return word
else: return "UUUNKKK" # unknown token
##
# Utility functions used to create dataset
##
def augment_wv(df, extra=["UUUNKKK"]):
for e in extra:
df.loc[e] = zeros(len(df.columns))
def prune_wv(df, vocab, extra=["UUUNKKK"]):
"""Prune word vectors to vocabulary."""
items = set(vocab).union(set(extra))
return df.filter(items=items, axis='index')
def load_wv_raw(fname):
return pd.read_table(fname, sep="\s+",
header=None,
index_col=0,
quoting=3)
def load_dataset(fname):
docs = []
with open(fname) as fd:
cur = []
for line in fd:
# new sentence on -DOCSTART- or blank line
if re.match(r"-DOCSTART-.+", line) or (len(line.strip()) == 0):
if len(cur) > 0:
docs.append(cur)
cur = []
else: # read in tokens
cur.append(line.strip().split("\t",1))
# flush running buffer
docs.append(cur)
return docs
def extract_tag_set(docs):
tags = set(flatten1([[t[1].split("|")[0] for t in d] for d in docs]))
return tags
def extract_word_set(docs):
words = set(flatten1([[t[0] for t in d] for d in docs]))
return words
def pad_sequence(seq, left=1, right=1):
return left*[("<s>", "")] + seq + right*[("</s>", "")]
##
# For window models
def seq_to_windows(words, tags, word_to_num, tag_to_num, left=1, right=1):
ns = len(words)
X = []
y = []
for i in range(ns):
if words[i] == "<s>" or words[i] == "</s>":
continue # skip sentence delimiters
tagn = tag_to_num[tags[i]]
idxs = [word_to_num[words[ii]]
for ii in range(i - left, i + right + 1)]
X.append(idxs)
y.append(tagn)
return array(X), array(y)
def docs_to_windows(docs, word_to_num, tag_to_num, wsize=3):
pad = (wsize - 1)/2
docs = flatten1([pad_sequence(seq, left=pad, right=pad) for seq in docs])
words, tags = zip(*docs)
words = [canonicalize_word(w, word_to_num) for w in words]
tags = [t.split("|")[0] for t in tags]
return seq_to_windows(words, tags, word_to_num, tag_to_num, pad, pad)
def window_to_vec(window, L):
"""Concatenate word vectors for a given window."""
return concatenate([L[i] for i in window])
##
# For fixed-window LM:
# each row of X is a list of word indices
# each entry of y is the word index to predict
def seq_to_lm_windows(words, word_to_num, ngram=2):
ns = len(words)
X = []
y = []
for i in range(ns):
if words[i] == "<s>":
continue # skip sentence begin, but do predict end
idxs = [word_to_num[words[ii]]
for ii in range(i - ngram + 1, i + 1)]
X.append(idxs[:-1])
y.append(idxs[-1])
return array(X), array(y)
def docs_to_lm_windows(docs, word_to_num, ngram=2):
docs = flatten1([pad_sequence(seq, left=(ngram-1), right=1)
for seq in docs])
words = [canonicalize_word(wt[0], word_to_num) for wt in docs]
return seq_to_lm_windows(words, word_to_num, ngram)
##
# For RNN LM
# just convert each sentence to a list of indices
# after padding each with <s> ... </s> tokens
def seq_to_indices(words, word_to_num):
return array([word_to_num[w] for w in words])
def docs_to_indices(docs, word_to_num):
docs = [pad_sequence(seq, left=1, right=1) for seq in docs]
ret = []
for seq in docs:
words = [canonicalize_word(wt[0], word_to_num) for wt in seq]
ret.append(seq_to_indices(words, word_to_num))
# return as numpy array for fancier slicing
return array(ret, dtype=object)
def offset_seq(seq):
return seq[:-1], seq[1:]
def seqs_to_lmXY(seqs):
X, Y = zip(*[offset_seq(s) for s in seqs])
return array(X, dtype=object), array(Y, dtype=object)
##
# For RNN tagger
# return X, Y as lists
# where X[i] is indices, Y[i] is tags for a sequence
# NOTE: this does not use padding tokens!
# (RNN should natively handle begin/end)
def docs_to_tag_sequence(docs, word_to_num, tag_to_num):
# docs = [pad_sequence(seq, left=1, right=1) for seq in docs]
X = []
Y = []
for seq in docs:
if len(seq) < 1: continue
words, tags = zip(*seq)
words = [canonicalize_word(w, word_to_num) for w in words]
x = seq_to_indices(words, word_to_num)
X.append(x)
tags = [t.split("|")[0] for t in tags]
y = seq_to_indices(tags, tag_to_num)
Y.append(y)
# return as numpy array for fancier slicing
return array(X, dtype=object), array(Y, dtype=object)
def idxs_to_matrix(idxs, L):
"""Return a matrix X with each row
as a word vector for the corresponding
index in idxs."""
return vstack([L[i] for i in idxs]) | {
"content_hash": "fef9c90e45ffd8238296dbc405885730",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 77,
"avg_line_length": 30.427083333333332,
"alnum_prop": 0.5920917494008902,
"repo_name": "dmitrinesterenko/cs224d",
"id": "729cea4309a270434a19c66d1bfaf70a74078968",
"size": "5842",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "assignment2/data_utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135342"
},
{
"name": "Shell",
"bytes": "2598"
}
],
"symlink_target": ""
} |
import abc
from cmath import pi
import multiprocessing as mp
import os
import queue
import threading
class NoMoreJobsException(Exception):
"""An exception indicating that no more jobs are available."""
pass
def worker(job_queue, result_queue, shutdown_event, pinned_cpus_queue,
pinned_cpus_semaphore, process_job_fn):
"""A worker function to be run in a separate process.
This function is on every worker process. It iteratively retrieves jobs from
job_queue until the job queue is empty and shutdown_event was set by the
scheduler process. Results are enqueued in result_queue.
If pinned_cpus_queue is set, this function pins the executing process to a
CPU ID that is taken from the queue.
"""
if pinned_cpus_queue:
cpu_id = pinned_cpus_queue.get()
os.sched_setaffinity(0, {cpu_id})
pinned_cpus_semaphore.release()
while not (job_queue.empty() and shutdown_event.is_set()):
job = None
try:
# Block for up to 0.1s, then start a new iteration.
job = job_queue.get(True, 0.1)
except queue.Empty:
continue
result_queue.put(process_job_fn(job))
class JobScheduler(abc.ABC):
"""An abstract job scheduler.
A job scheduler manages a pool of worker processes and dispatches jobs to
them. The job scheduler cannot be accessed from worker processes.
Subclasses must provide two methods: get_next_job generates a job to be
dispatched to some available worker. When the job has been processed, the
process_result callback is invoked.
When creating a job scheduler, users must specify the number of processes
and a function for processing a job (that is executed on worker processes).
The scheduler keeps requesting and scheduling new jobs until get_next_job
throws a NoMoreJobsException or the shutdown method is called.
"""
def __init__(self, num_processes, process_job_fn, pinned_cpus=[]):
"""Initialize the job scheduler.
num_processes is the desired number of worker processes. These can
optionally be pinned to a CPU. In that case, the number of CPU IDs in
pinned_cpus must match num_processes. process_job_fn is a callback for
processing a job. It is executed in a worker process and has no access
to the job scheduler.
"""
assert len(pinned_cpus) == num_processes or len(
pinned_cpus) == 0, "invalid number of pinned_cpus"
self.job_queue = mp.Queue()
self.pinned_cpus = pinned_cpus
self.pinned_cpus_queue = mp.Queue() if len(pinned_cpus) > 0 else None
self.pinned_cpus_semaphore = mp.Semaphore()
self.result_processor_thread = threading.Thread(
target=self._result_processor)
self.result_queue = mp.Queue()
self.shutdown_event = mp.Event()
self.processes = [
mp.Process(
target=worker,
args=(self.job_queue, self.result_queue, self.shutdown_event,
self.pinned_cpus_queue, self.pinned_cpus_semaphore,
process_job_fn)) for i in range(num_processes)
]
self.num_jobs = 0
self.was_started = False
@abc.abstractmethod
def get_next_job(self):
"""Generate a new job."""
pass
@abc.abstractmethod
def process_result(self, result):
"""Process a job result."""
pass
def _enqueue_job(self):
"""Generate a new job and enqueue it."""
if self.shutdown_event.is_set():
raise NoMoreJobsException()
job = self.get_next_job()
self.num_jobs += 1
self.job_queue.put(job)
def _result_processor(self):
"""A function that is iteratively looking for results.
This function is run in the job scheduler process, but in a separate
thread. It takes results from the result queue and passes them to the
abstract process_result method. For every result, it tries to enqueue
a new job. When a result has been received for each job and no more jobs
are available, the worker processes are shutdown.
"""
while self.num_jobs > 0:
result = self.result_queue.get()
self.num_jobs -= 1
self.process_result(result)
try:
self._enqueue_job()
except NoMoreJobsException:
pass
self.shutdown_event.set()
def start(self):
"""Start the job scheduler.
This function starts the scheduler and returns immediately.
"""
assert not self.was_started, "cannot start the scheduler multiple times"
self.was_started = True
self.shutdown_event.clear()
# Enqueue pinned CPU IDs.
for cpu_id in self.pinned_cpus:
self.pinned_cpus_queue.put(cpu_id)
# Start processes.
for p in self.processes:
p.start()
# Wait until each process pinned itself to a CPU (if pinning is requested).
if len(self.pinned_cpus) > 0:
for i in range(len(self.processes)):
self.pinned_cpus_semaphore.acquire()
# Enqueue a few initial jobs. The number of jobs must be large enough to
# keep the workers busy. For every received result, a new job will be
# enqueued in _result_processor, so the overall number of jobs in the queue
# will stay more or less the same throughout the execution.
for i in range(2 * len(self.processes)):
try:
self._enqueue_job()
except NoMoreJobsException:
break
# Start processing results in a new thread (but in the same process).
self.result_processor_thread.start()
def join(self):
"""Block until all jobs have been processed and the workers have been
shut down.
"""
self.result_processor_thread.join()
for p in self.processes:
p.join()
def shutdown(self):
"""Stop enqueing new jobs."""
self.shutdown_event.set()
| {
"content_hash": "15c61096c5d7a0737e283ce3bfb773c4",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 33.84431137724551,
"alnum_prop": 0.6838287331917905,
"repo_name": "iree-org/iree-llvm-sandbox",
"id": "c0ab689133c9fccc5c893f96d15ed38c7e353453",
"size": "5676",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/tools/scheduler/scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3782"
},
{
"name": "C++",
"bytes": "245631"
},
{
"name": "CMake",
"bytes": "21081"
},
{
"name": "MLIR",
"bytes": "81484"
},
{
"name": "Python",
"bytes": "670324"
},
{
"name": "Shell",
"bytes": "101058"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import win32gui, win32ui, win32con, win32api
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow()
if region:
left,top,x2,y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height,width,4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
| {
"content_hash": "a36bc6bab24098041994a768acd4ed9d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 33.05555555555556,
"alnum_prop": 0.6890756302521008,
"repo_name": "Sentdex/pygta5",
"id": "b54a15b46f2f835909e44b46e9a8d1e80baf8ca1",
"size": "1212",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "original_project/vjoy-testing/grabscreen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "319220"
}
],
"symlink_target": ""
} |
from cno.core.base import CNOBase
from cno.core.results import BooleanResults
from cno.core.models import BooleanModels
from cno.misc.profiler import do_profile
from cno.io.reactions import Reaction
import pandas as pd
import numpy as np
import pylab
import time
import bottleneck as bn
from collections import defaultdict
import collections
from cno.boolean.steady import Steady
class Fuzzy(Steady):
"""Implementation of Fuzzy logic
Edge are of 2 types: either edge from a stimuli, or other edges.
In original version, parameters are encoded as discrete values [0,1,...8]
and those values are mapped onto a set of g,k,n parameters of the hill function.
Stimuli have only one parameter to tune: the gain. Assume a linear variation. Why ?
Others have same gain set to 1 but k and n changes. why ?
"""
def __init__(self, pknmodel, data, verbose=True):
super(Fuzzy, self).__init__(pknmodel, data, verbose)
def init(self, time):
super(Fuzzy, self).init(time)
def hill_tf(self, x, g, n, k):
return (g*(1+k**n)*x**n/(x**n+k**n))
#@do_profile()
def simulate(self, tick=1, parameters=[]):
"""
parameters will be a list of params on each edge.
"""
# pandas is very convenient but slower than numpy
# The dataFrame instanciation is costly as well.
# For small models, it has a non-negligeable cost.
# inhibitors will be changed if not ON
#self.tochange = [x for x in self.model.nodes() if x not in self.stimuli_names
# and x not in self.and_gates]
# what about a species that is both inhibited and measured
testVal = 1e-3
values = self.values.copy()
if self.debug:
self.debug_values = []
self.residuals = []
self.penalties = []
self.count = 0
self.nSp = len(values)
residual = 1.
frac = 1.2
# #FIXME +1 is to have same resrults as in CellnOptR
# It means that if due to the cycles, you may not end up with same results.
# this happends if you have cyvles with inhbititions
# and an odd number of edges.
if reactions is None:
reactions = self.model.buffer_reactions
self.number_edges = len(reactions)
# 10 % time here
#predecessors = self.reactions_to_predecessors(reactions)
predecessors = defaultdict(collections.deque)
for r in reactions:
k,v = self._reac2pred[r]
predecessors[k].extend(v)
# speed up
keys = self.values.keys()
length_predecessors = dict([(node, len(predecessors[node])) for node in keys])
#self._length_predecessors = length_predecessors
# if there is an inhibition/drug, the node is 0
values = self.values.copy()
for inh in self.inhibitors_names:
if length_predecessors[inh] == 0:
#values[inh] = np.array([np.nan for x in range(0,self.N)])
#values[inh] = np.array([0 for x in range(0,self.N)])
values[inh] = np.zeros(self.N)
while (self.count < self.nSp * frac +1.) and residual > testVal:
self.previous = values.copy()
#self.X0 = pd.DataFrame(self.values)
#self.X0 = self.values.copy()
# compute AND gates first. why
for node in self.and_gates:
# replace na by large number so that min is unchanged
# THere are always predecessors
if length_predecessors[node] != 0:
values[node] = bn.nanmin(np.array([values[x] for x in predecessors[node]]), axis=0)
else:
#assert 1==0, "%s %s" % (node, predecessors[node])
values[node] = self.previous[node]
for node in self.tochange:
# easy one, just the value of predecessors
#if len(self.predecessors[node]) == 1:
# self.values[node] = self.values[self.predecessors[node][0]].copy()
if length_predecessors[node] == 0:
pass # nothing to change
else:
# TODO: if only one input, no need for that, just propagate signal.
dummy = np.array([values[x] if (x,node) not in self.toflip
else 1 - values[x] for x in predecessors[node]])
values[node] = bn.nanmax(dummy, axis=0)
# take inhibitors into account
if node in self.inhibitors_names:
# if inhibitors is on (1), multiply by 0
# if inhibitors is not active, (0), does nothing.
values[node] *= 1 - self.inhibitors[node].values
# here NAs are set automatically to zero because of the int16 cast
# but it helps speeding up a bit the code by removig needs to take care
# of NAs. if we use sumna, na are ignored even when 1 is compared to NA
self.m1 = np.array([self.previous[k] for k in keys ], dtype=np.int16)
self.m2 = np.array([values[k] for k in keys ], dtype=np.int16)
#residual = bn.nansum(np.square(self.m1 - self.m2))
#residual = np.nansum(np.square(self.m1 - self.m2))
residual = np.nansum(np.square(self.m1 - self.m2))
# TODO stop criteria should account for the length of the species to the
# the node itself so count < nSp should be taken into account whatever is residual.
#
if self.debug:
self.debug_values.append(self.previous.copy())
self.residuals.append(residual)
self.count += 1
if self.debug is True:
# add the latest values simulated in the while loop
self.debug_values.append(values.copy())
# Need to set undefined values to NAs
self.simulated[self.time] = np.array([values[k]
for k in self.data.df.columns ], dtype=float)#.transpose()
self.prev = {}
self.prev[self.time] = np.array([self.previous[k]
for k in self.data.df.columns ], dtype=float)#.transpose()
mask = self.prev[self.time] != self.simulated[self.time]
self.simulated[self.time][mask] = np.nan
self.simulated[self.time] = self.simulated[self.time].transpose()
# set the non-resolved bits to NA
# TODO TODO TODO
#newInput[which(abs(outputPrev-newInput) > testVal)] <- NA
# loops are handle diffenty
#@do_profile()
def score(self, NAFac=1, sizeFac=1e-4):
# We need also to include NAFac, number of reactions in the model
# for the sizeFac
# time 1 only is taken into account
#self.diff = np.square(self.measures[self.time] - self.simulated[self.time])
diff = self.measures[self.time] - self.simulated[self.time]
diff *= diff
N = diff.shape[0] * diff.shape[1]
Nna = np.isnan(diff).sum()
N-= Nna
#nInTot = number of edges on in global model
#nInTot = len(self.model.reactions)
nInTot = self.nInputs # should be correct
nDataPts = diff.shape[0] * diff.shape[1]
nDataP = N # N points excluding the NA if any
#print(N)
#NAPen = NAFac * sum(self.simulated.isnull())
# nInTot: number of inputs of expanded miodel
# nInputs: number of inputs of cut model
# In CNO:
# nDataPts = number of points irrespective of NA
# nDataP sum(!is.na(CNOlist@signals[[timeIndex]]))
# nInputs = number of inputs of the cut model
# for now, ketassume it is the same as the number of reactions
# TODO AND gates should count for 1 edge
nInputs = self.number_edges
sizePen = nDataPts * sizeFac * nInputs / float(nInTot)
#self.debug("nDataPts=%s" % nDataPts)
#self.debug("nInputs=%s" % nInputs)
#self.debug("nInTot=%s" % nInTot)
#self.debug('sizePen=%s' %sizePen)
# TODO
deviationPen = bn.nansum(diff) / 2. # to be in agreement with CNO but wrong
self.diff = diff / 2.
#self.debug("deviationPen=%s"% deviationPen)
#self.debug("Nna=%s"% Nna)
#self.debug("nDataP=%s"% nDataP)
deviationPen /= float(nDataP)
#self.debug("deviationPen=%s"% deviationPen)
S = deviationPen + sizePen / nDataP
return S
def plot_errors(self, columns=None):
# What do we use here: self.values
print("Use only time 1..")
# use eval_func with debug one
debug = self.debug
self.debug = True
buffering = self.buffering
self.buffering = False
self.eval_func(self.ga.results['Best_bitString'][-1])
self.buffering = buffering
self.debug = debug
if columns is None:
columns = self.data.df.columns
X1 = pd.DataFrame(self.debug_values[-1])[columns].copy()
X1 = self.get_df()
N = X1.shape[0]
X1['time'] = [self.time] * N
X1['cell'] = [self.data.cellLine] * N
X1['experiment'] = self.data.experiments.index
X1.set_index(['cell', 'experiment', 'time'], inplace=True)
self.data.sim.ix[X1.index] = X1
self.data.plot(mode='mse')
print("MSE= %s(caspo/cno with only 1 time)" % self.score())
print("MSE= %s(cellnoptr with only 1 time)" % str(self.score()/2.))
#@do_profile()
def eval_func(self, chromosome, prior=[]):
"""
:param prior: a list of same length as chromosome made of 0/1/None
"""
# TODO limnit the buffering ?
for i, this in enumerate(prior):
if this is not None:
chromosome[i] = this
# using string or tuple takes about the same time but faster than a list
str_chrome = tuple(chromosome)
if self.buffering and len(self.buffer)<self.length_buffer and str_chrome in self.buffer.keys():
return self.buffer[str_chrome]
else:
# 110 times faster using numpy array instead of a list...
reactions = [x for c,x in zip(chromosome, self._np_reactions) if c==1]
self.simulate(reactions=reactions)
score = self.score()
if self.buffering is True and len(self.buffer)<self.length_buffer:
self.buffer[str_chrome] = score
self.counter +=1
return score
def optimise(self, verbose=False, maxgens=500, show=False, reltol=0.1,
maxtime=60, prior=[]):
"""Using the CellNOptR-like GA"""
from cno.optimisers import genetic_algo
ga = genetic_algo.GABinary(len(self.model.reactions), verbose=verbose,
maxgens=maxgens, maxtime=maxtime, reltol=reltol)
def eval_func_in(x):
return self.eval_func(x, prior=prior)
self.counter = 0
ga.getObj = eval_func_in
ga.run(show=show)
self.ga = ga
self._fill_results()
return ga
def _fill_results(self):
from easydev import AttrDict
res = AttrDict(**self.ga.results)
results = pd.DataFrame(self.ga.results)
columns_int = ['Generation', 'Stall_Generation']
columns_float = ['Best_score', 'Avg_Score_Gen', 'Best_Score_Gen', 'Iter_time']
results[columns_int] = results[columns_int].astype(int)
results[columns_float] = results[columns_float].astype(float)
results = {
'best_score': res.Best_score,
'best_bitstring': res.Best_bitString[-1],
'all_scores': self.ga.popTolScores,
'all_bitstrings': self.ga.popTol,
'reactions': self.model.reactions,
#'sim_results': self.session.sim_results, # contains mse and sim at t0,t1,
'results': results,
#'models': models,
#'stimuli': self.session.stimuli.copy(),
#'inhibitors': self.session.inhibitors.copy(),
#'species': self.session.species,
}
results['pkn'] = self.pknmodel
results['midas'] = self.data
#self.results.models = models
all_bs = self.ga.popTol
df = pd.DataFrame(all_bs, columns=self.model.reactions)
models = BooleanModels(df)
models.scores = results['all_scores']
self.results.results = results
self.results.models = models
| {
"content_hash": "cfae92fc44bdfcb80f870a670667dc55",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 103,
"avg_line_length": 37.055882352941175,
"alnum_prop": 0.5795698071275498,
"repo_name": "cellnopt/cellnopt",
"id": "1f0df7548bcdab595ea2e62a8f7616fbeb774953",
"size": "12599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cno/fuzzy/fuzzy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "11056"
},
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Jupyter Notebook",
"bytes": "3748599"
},
{
"name": "Python",
"bytes": "845977"
}
],
"symlink_target": ""
} |
"""Wrappers for protocol buffer enum types."""
import enum
class ClassificationType(enum.IntEnum):
"""
Type of the classification problem.
Attributes:
CLASSIFICATION_TYPE_UNSPECIFIED (int): Should not be used, an un-set enum has this value by default.
MULTICLASS (int): At most one label is allowed per example.
MULTILABEL (int): Multiple labels are allowed for one example.
"""
CLASSIFICATION_TYPE_UNSPECIFIED = 0
MULTICLASS = 1
MULTILABEL = 2
class Model(object):
class DeploymentState(enum.IntEnum):
"""
Deployment state of the model.
Attributes:
DEPLOYMENT_STATE_UNSPECIFIED (int): Should not be used, an un-set enum has this value by default.
DEPLOYED (int): Model is deployed.
UNDEPLOYED (int): Model is not deployed.
"""
DEPLOYMENT_STATE_UNSPECIFIED = 0
DEPLOYED = 1
UNDEPLOYED = 2
| {
"content_hash": "ac3996a8536ecf054126ef56be07379f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 107,
"avg_line_length": 27.470588235294116,
"alnum_prop": 0.6498929336188437,
"repo_name": "dhermes/gcloud-python",
"id": "b98cd0818972d907b6edc56739cb1f2a75bf3ef4",
"size": "1535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "automl/google/cloud/automl_v1beta1/gapic/enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
} |
"""
https://code.google.com/codejam/contest/6214486/dashboard
Max recursion depth exceeded on large input. This could be
avoided by visiting rooms in order of their assigned value,
largest value first.
"""
import itertools as it
FOUR_DOORS = ((1, 0), (-1, 0), (0, 1), (0, -1))
def memo(f):
"""memoization decorator, taken from Peter Norvig's Design of Computer
Programs course on Udacity.com"""
cache = {}
def _f(*args):
try:
return cache[args]
except KeyError:
result = cache[args] = f(*args)
return result
except TypeError: # unhashable argument
return f(*args)
return _f
def neighbors(maze, index): # runtime: O(1)
i, j = index
S = len(maze)
neighbor_coords = ((i+a, j+b) for a,b in FOUR_DOORS)
return ((a,b,maze[a][b]) for a,b in neighbor_coords if 0<=a<S and 0<=b<S)
@memo
def travel_distance(maze, room_index):
i, j = room_index
room_value = maze[i][j]
next_room_index = next(((i2,j2)
for i2,j2,v in neighbors(maze, room_index)
if v == room_value+1), None)
if next_room_index is None:
return 1
return 1 + travel_distance(maze, next_room_index)
def find_winner(maze):
S = len(maze)
maze_indices = it.product(xrange(S), xrange(S))
room_value = lambda i, j: maze[i][j]
winner_room_index = max(maze_indices,
key=lambda ii: (travel_distance(maze, ii), -room_value(*ii)))
return room_value(*winner_room_index), travel_distance(maze, winner_room_index)
def main():
T = int(raw_input().strip())
raw_input()
for t in xrange(1, T+1):
S = int(raw_input().strip())
maze = tuple(tuple(map(int, raw_input().split())) for _ in xrange(S))
r, d = find_winner(maze)
print "Case #{}: {} {}".format(t, r, d)
if __name__ == '__main__':
main()
| {
"content_hash": "4cfc9b5a37394b2d9aa44bca061743d4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 89,
"avg_line_length": 26.561643835616437,
"alnum_prop": 0.5729757607013924,
"repo_name": "py-in-the-sky/challenges",
"id": "b1773eb779865723ee13a77d86b08c9146e0f374",
"size": "1939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-code-jam/cube_iv_memoized.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "2960"
},
{
"name": "Java",
"bytes": "598"
},
{
"name": "Python",
"bytes": "216347"
},
{
"name": "Ruby",
"bytes": "10934"
}
],
"symlink_target": ""
} |
import random, sys, sqlite3, getopt
sys.path.insert(0, "modules/")
from collections import Counter
from monmods import GetNature
from monmods import Shiny
from monmods import RareRoll
class Nature(object):
conn = sqlite3.connect('PTA_ORAS.db')
ModUp = 2
ModDown = 2
def __init__(self,nature):
# I have to put strings that go into sqlite3 queries in () with a comma so we do that below;
natname = (nature,)
natdata = Nature.conn.execute('SELECT `raise`, lower FROM ORAS_nature where name=?' , natname)
self.name = nature
for row in natdata:
self.StatUp = row[0]
self.StatDown = row[1]
if self.StatUp == "None":
Nature.ModUp = 0
if self.StatDown == "None":
Nature.ModDown = 0
if self.StatUp == "hp":
Nature.ModUp = 1
if self.StatDown == "hp":
Nature.ModDown = 1
self.StatUpMod = {str(self.StatUp): int(Nature.ModUp)}
self.StatDownMod = {str(self.StatDown): int(Nature.ModDown)}
def __str__(self):
return "Nature: {}\n+{} {}\n-{} {}".format(self.name, Nature.ModUp, self.StatUp, Nature.ModDown, self.StatDown)
| {
"content_hash": "e37d391cb39942eea837792239720565",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 113,
"avg_line_length": 34.03225806451613,
"alnum_prop": 0.6834123222748815,
"repo_name": "Phixia/WildEncounter",
"id": "3b100ba692aac1ff9ecfddb88db4bc44ed8bb318",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/nature.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "42765"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
from django.db.models.functions import Length
def forwards_func(apps, schema_editor):
max_length = 63
Project = apps.get_model('projects', 'Project')
projects_invalid_slug = (
Project
.objects
.annotate(slug_length=Length('slug'))
.filter(slug_length__gt=max_length)
)
for project in projects_invalid_slug:
project.slug = project.slug[:max_length]
project.save()
projects_invalid_name = (
Project
.objects
.annotate(name_length=Length('name'))
.filter(name_length__gt=max_length)
)
for project in projects_invalid_name:
project.name = project.name[:max_length]
project.save()
class Migration(migrations.Migration):
dependencies = [
('projects', '0029_add_additional_languages'),
]
operations = [
migrations.RunPython(forwards_func),
migrations.AlterField(
model_name='project',
name='slug',
field=models.SlugField(max_length=63, unique=True, verbose_name='Slug'),
),
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(max_length=63, verbose_name='Name'),
),
]
| {
"content_hash": "1152989fff4a701fa4d434a3474eef02",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 84,
"avg_line_length": 27.70212765957447,
"alnum_prop": 0.5998463901689708,
"repo_name": "rtfd/readthedocs.org",
"id": "ee27e9602a9a00859c0f85390519b245797f3c2d",
"size": "1376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/projects/migrations/0030_change-max-length-project-slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.