text stringlengths 4 1.02M | meta dict |
|---|---|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary"
_path_str = "scatterternary.selected"
_valid_props = {"marker", "textfont"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scatterternary.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scatterternary.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterternary.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.selected.Te
xtfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Selected`
marker
:class:`plotly.graph_objects.scatterternary.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.selected.Te
xtfont` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "9729b6bb1d66cec29e2479988c0b18a4",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 86,
"avg_line_length": 30.25170068027211,
"alnum_prop": 0.5439622217225095,
"repo_name": "plotly/plotly.py",
"id": "ea416db8aa6c8d72ba21c32a092fc1cbade59ed6",
"size": "4447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scatterternary/_selected.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Add workouts table
Revision ID: 9cd42e48cd23
Revises:
Create Date: 2017-02-26 15:19:51.725179
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9cd42e48cd23'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('workouts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DATE(), nullable=False),
sa.Column('exercises', postgresql.JSON(astext_type=sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('workouts')
| {
"content_hash": "9ac2e4ddc84ab3d33bccb1ad99f162ee",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 87,
"avg_line_length": 22.75862068965517,
"alnum_prop": 0.693939393939394,
"repo_name": "vinntreus/training_stats",
"id": "00e4799706ea55ce2904f5a5799d607ac6c1a9b5",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/9cd42e48cd23_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "410"
},
{
"name": "HTML",
"bytes": "2371"
},
{
"name": "Makefile",
"bytes": "469"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "14715"
}
],
"symlink_target": ""
} |
from flask import Flask
from git import *
import shutil
import os
import stat
import time
app = Flask(__name__)
@app.route("/")
def pythonBlameHistory():
#path is the hardcoded folder for the last download of ghdata
repo_path = './ghdata'
#We must remove the old ghdata if we want to download a new copy.
#In order to delete it, we must first change the permissions
#To be writable for all files and directories.
#Based on this: http://stackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions
if os.path.exists(repo_path):
for root, directories, files in os.walk(repo_path):
for directory in directories:
os.chmod(os.path.join(root, directory), stat.S_IWRITE)
for file in files:
os.chmod(os.path.join(root, file), stat.S_IWRITE)
os.chmod(repo_path, stat.S_IWRITE)
#delete the old ghdata
shutil.rmtree(repo_path)
#Get the ghdata repository from GitHub
repo = Repo.init('ghdata')
origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git')
origin.fetch()
origin.pull(origin.refs[0].remote_head)
#This is the total number of lines in a single file
total_lines_in_file = 0
#This is the total number of lines in an entire repo
total_lines_in_repo = 0
#The output string will be displayed to the screen once everything is done running.
outputString = ""
#Outer loop: loop through each commit in the master branch.
#This corresponds to the history of commits over time.
total_lines_in_repo = 0
#Now loop through every file in the repo.
#You cannot use the os library file/directory loop for this part.
#(as was used above to change file permissions)
#That is because some files do not exist in every commit.
#You must loop through the commit tree, not the ghdata directory.
for file_in_repo in repo.head.commit.tree.traverse():
#For each file, we want to clear out the total lines and organization totals per file.
#That's because we're starting over with a new file.
total_lines_in_file = 0
#Files are of the blob type. This if statement prevents us from trying
#to examine 'lines' in a directory.
if file_in_repo.type == 'blob':
for blame_commit, lines in repo.blame('HEAD', file_in_repo.path):
#Git blame does not always return one line at a time.
#Sometimes we are returned several lines committed by the same author.
#In that case, we must count how many lines there are or our
#total will not match the actual file.
for line in lines:
#increment lines in the file as a whole
total_lines_in_file += 1
#If there is at least one line in this file
if total_lines_in_file > 0:
#Add the total lines in this file to the total lines in the repo.
total_lines_in_repo += total_lines_in_file
#Construct output for this commit. First output the commit, date, and total lines in the repo.
outputString = outputString + "REPO TOTALS FOR HEAD COMMIT: " + str(repo.head.commit) + " authored at " + time.strftime("%I:%M %p, %b %d, %Y", time.gmtime(repo.head.commit.authored_date)) + " <br>"
outputString = outputString + "TOTAL REPO LINES: " + str(total_lines_in_repo) + "<br>"
return outputString
if __name__ == "__main__":
app.run()
| {
"content_hash": "00a8e2dea798e841d1c9f814d6a9f175",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 202,
"avg_line_length": 47.46666666666667,
"alnum_prop": 0.6491573033707865,
"repo_name": "Hackers-To-Engineers/ghdata-sprint1team-2",
"id": "086d2bc26751b14ff017846f30b6569a94a3f5c1",
"size": "3560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "busFactor/pythonBlameLinesInRepo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51262"
},
{
"name": "HTML",
"bytes": "7692"
},
{
"name": "JavaScript",
"bytes": "13109"
},
{
"name": "Makefile",
"bytes": "1358"
},
{
"name": "Python",
"bytes": "89972"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.contenttypes.models import ContentType
from reversion.models import Version, has_int_pk
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for version in Version.objects.all().iterator():
try:
content_type = ContentType.objects.get_for_id(version.content_type_id)
except AttributeError:
version.delete() # This version refers to a content type that doesn't exist any more.
continue
model = content_type.model_class()
if has_int_pk(model):
version.object_id_int = int(version.object_id)
version.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reversion.revision': {
'Meta': {'object_name': 'Revision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'reversion.version': {
'Meta': {'object_name': 'Version'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {}),
'object_id_int': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.TextField', [], {}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reversion.Revision']"}),
'serialized_data': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['reversion']
| {
"content_hash": "a29b1f474b34a06caca48a051c355a9b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 182,
"avg_line_length": 62.625,
"alnum_prop": 0.5590636908002178,
"repo_name": "vipins/ccccms",
"id": "51acdb156ba1ebe069fd4f2c98cd62c293d77335",
"size": "5529",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/Lib/site-packages/reversion/migrations/0004_populate_object_id_int.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "309380"
},
{
"name": "C++",
"bytes": "136422"
},
{
"name": "CSS",
"bytes": "250114"
},
{
"name": "JavaScript",
"bytes": "626303"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "11038514"
},
{
"name": "Shell",
"bytes": "889"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
import doctest
import unittest
import glob
import os
optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
def list_doctests():
print __file__
return [filename
for filename
in glob.glob(os.path.join(os.path.dirname(__file__), '*.txt'))]
def open_file(filename, mode='r'):
"""Helper function to open files from within the tests package."""
return open(os.path.join(os.path.dirname(__file__), filename), mode)
def setUp(test):
test.globs.update(dict(
open_file = open_file,
))
def test_suite():
return unittest.TestSuite(
[doctest.DocFileSuite(os.path.basename(filename),
optionflags=optionflags,
setUp=setUp)
for filename
in list_doctests()])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=1)
runner.run(test_suite())
| {
"content_hash": "8ad680f7140f2542c432f68dd708946c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 28.228571428571428,
"alnum_prop": 0.5910931174089069,
"repo_name": "31415us/trajectory",
"id": "25d5f7a1ea4293ea9c68897a35dfd45b2c303475",
"size": "988",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "py/env/lib/python2.7/site-packages/shapely/tests/test_doctests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "29845"
},
{
"name": "Python",
"bytes": "1518367"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
def ref_softmax(x, axis):
x = x - x.max(axis, keepdims=True)
x = np.exp(x) / np.exp(x).sum(axis, keepdims=True)
return x
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2, -1, -2, -3])
@pytest.mark.parametrize("ctx, func_name", list_context('Softmax'))
def test_softmax_forward_backward(seed, axis, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
function_tester(rng, F.softmax, ref_softmax, inputs, func_args=[axis],
ctx=ctx, func_name=func_name)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2, -1, -2, -3])
@pytest.mark.parametrize("ctx, func_name", list_context('Softmax'))
def test_softmax_double_backward(seed, axis, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
backward_function_tester(rng, F.softmax, inputs, func_args=[axis], ctx=ctx)
| {
"content_hash": "255a4184315212054c953a00db1418dc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 38.54838709677419,
"alnum_prop": 0.6769874476987447,
"repo_name": "sony/nnabla",
"id": "69d3bb4b7c04e84cd4853d99acae339996dde6e7",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/function/test_softmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
} |
__author__ = 'Gene'
import PIL
from PIL import Image
import random
def generate_random_seed(width, height, out_file_name):
file = Image.new("RGB", (width, height), 0)
for i in range(height):
for j in range(width):
file.putpixel((i,j),(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
file.save(out_file_name, "PNG")
def generate_random_seed(width, height, out_file_name):
file = Image.new("RGB", (width, height), "white")
file.save(out_file_name, "PNG") | {
"content_hash": "1ca19a40710c517244053ec1141ff72d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 105,
"avg_line_length": 33.4375,
"alnum_prop": 0.6261682242990654,
"repo_name": "gene-levitzky/Cartographer",
"id": "0c72caaf7661bfe2bf1fd7194828730a38e6acc2",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SeedGenerator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1911"
},
{
"name": "Shell",
"bytes": "24"
}
],
"symlink_target": ""
} |
from numpy import prod
from .. import check
from ..check import is_shape
class Memory(object):
def __init__(self, ops, size=128):
if size < 0:
raise ValueError("TODO error re negative size %d" % size)
self.ops = ops
self._mem = self.ops.allocate((2, size))
self._offsets = {}
self._i = 0
@property
def weights(self):
return self._mem[0, :self._i]
@property
def gradient(self):
return self._mem[1, :self._i]
def __contains__(self, name):
return name in self._offsets
def __getitem__(self, name):
offset, col, shape = self._offsets[name]
return self._mem[col, offset : offset + prod(shape)].reshape(shape)
def get(self, name, default=None):
return self[name] if name in self._offsets else default
def set(self, value):
self._mem[0, :self._i] = value
@check.arg(2, is_shape)
def add(self, name, shape):
assert name not in self._offsets, "TODO error"
self._offsets[name] = (self._i, 0, shape)
blob = self._get_blob(prod(shape))
return blob[0].reshape(shape)
def add_gradient(self, grad_name, param_name):
assert grad_name not in self._offsets, "TODO error"
offset, _, shape = self._offsets[param_name]
self._offsets[grad_name] = (offset, 1, shape)
return self._mem[1, offset : offset + prod(shape)].reshape(shape)
def _get_blob(self, nr_req):
nr_avail = self._mem.shape[1] - (self._i+1)
if nr_avail < nr_req:
self._realloc(max(self._mem.shape[1], nr_req) * 2)
blob = self._mem[:, self._i : self._i + nr_req]
self._i += nr_req
return blob
def _realloc(self, new_size):
new_mem = self.ops.allocate((self._mem.shape[0], new_size))
new_mem[:, :self._i+1] = self._mem[:, :self._i+1]
self._mem = new_mem
#
# def merge_params(self, others):
# others = list(others)
# if not others:
# return None
# if not all(other.allow_resize for other in others):
# raise ValueError("TODO Error")
# sizes = [other._i+1 for other in others]
# nr_req = self._i + sum(sizes)
# if self._mem.shape[1] < nr_req:
# self._realloc(nr_req)
# self.allow_resize = False
# for other in others:
# other.replace_mem(self._get_blob(other._i))
#
# def replace_mem(self, mem):
# if not self.allow_resize:
# raise ValueError("TODO Error")
# self.allow_resize = False
# mem[:] = self._mem[:, :self._i]
# self._mem = mem
#
#
| {
"content_hash": "d2ac17b5e8ebcd560df653a8246fb78b",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 30.732558139534884,
"alnum_prop": 0.555807794173288,
"repo_name": "ryfeus/lambda-packs",
"id": "13e6af3c56ffc2ae3ce97b3f9780718051322d82",
"size": "2643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Spacy/source2.7/thinc/neural/mem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="width",
parent_name="waterfall.increasing.marker.line",
**kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "d8dec0bbe2fda91898cf7fccc20b7709",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 31.05263157894737,
"alnum_prop": 0.559322033898305,
"repo_name": "plotly/python-api",
"id": "80f8e4bf19368b237ddb50a80d1a052113098102",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/waterfall/increasing/marker/line/_width.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
import warnings
from nose.tools import assert_equals, assert_in, assert_true
from unittest import TestCase
from unittest.mock import patch
from test_botty.mocks import MockMessage, MockRequestGET
from botty_mcbotface.plugins.searches import google, youtube
# Short-circuit Message object that just returns results
mock_message = MockMessage()
class TestSearches(TestCase):
def setUp(self):
# lxml module throws warnings only relevant in production
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=ResourceWarning)
def tearDown(self):
del self
@patch('requests.get')
def test_google_search(self, mock_get_html):
"""Test google search retrieves first search result from google"""
# Path to mock link results data
curr_dir = os.path.dirname(__file__)
rel_path = '../../mocks/google_links.html'
html_path = os.path.join(curr_dir, rel_path)
mock_response = MockRequestGET(html_path)
mock_get_html.return_value = mock_response
# Call main method
first_result = google(mock_message, 'testing')
assert_equals('http://istqbexamcertification.com/what-is-software-testing/', first_result)
@patch('requests.get')
def test_youtube_search(self, mock_get_html):
"""Test youtube search retrieves first search result from youtube"""
# Path to mock link results data
curr_dir = os.path.dirname(__file__)
rel_path = '../../mocks/youtube_links.html'
html_path = os.path.join(curr_dir, rel_path)
mock_get_html.return_value = MockRequestGET(html_path)
# Call main method
first_result = youtube(mock_message, 'testing')
assert_in('https://www.youtube.com/watch?v=Bi-v6M4fGbA', first_result)
@patch('requests.get')
def test_google_search_handles_no_results(self, mock_get_html):
"""Test google search command can handle no search results"""
mock_get_html.return_value.text = ''
response = google(mock_message, 'testing')
assert_true('No Google results', response)
@patch('requests.get')
def test_youtube_search_handles_no_results(self, mock_get_html):
"""Test youtube search command can handle no search results"""
mock_get_html.return_value.text = ''
response = youtube(mock_message, 'testing')
assert_true('No YouTube results', response)
| {
"content_hash": "72e77cdf1212b70e18860f69c023ff81",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 98,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.6742971887550201,
"repo_name": "ColumbiaSC-Tech/botty_mcbotface",
"id": "d754841bf2fbb5efe09085a6da136c33f9ae034e",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test_botty/tests/test_plugins/test_searches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "721329"
},
{
"name": "Python",
"bytes": "57438"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'singfel'
copyright = u'2015, Chunhong Yoon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'singfeldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'singfel.tex', u'singfel Documentation',
u'Chunhong Yoon', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'singfel', u'singfel Documentation',
[u'Chunhong Yoon'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'singfel', u'singfel Documentation',
u'Chunhong Yoon', 'singfel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "28e023afb49158ade3ccdef8fc881c63",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 31.65983606557377,
"alnum_prop": 0.7051132686084143,
"repo_name": "eucall-software/singfel",
"id": "b613db793e402896508f205a9438ce0580120282",
"size": "8145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6703"
},
{
"name": "C++",
"bytes": "394227"
},
{
"name": "CMake",
"bytes": "49012"
},
{
"name": "Cuda",
"bytes": "15960"
},
{
"name": "GLSL",
"bytes": "745"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Matlab",
"bytes": "7517"
},
{
"name": "Python",
"bytes": "8145"
},
{
"name": "Shell",
"bytes": "642"
}
],
"symlink_target": ""
} |
"""Config Drive v2 helper."""
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
help='Config drive format. One of iso9660 (default) or vfat'),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def make_drive(self, path):
"""Make the config drive.
:param path: the path to place the config drive image at
:raises ProcessExecuteError if a helper process has failed.
"""
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path, tmpdir)
elif CONF.config_drive_format == 'vfat':
self._make_vfat(path, tmpdir)
else:
raise exception.ConfigDriveUnknownFormat(
format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
def __repr__(self):
return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">"
def required_by(instance):
image_prop = utils.instance_sys_meta(instance).get(
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive', 'optional')
if image_prop not in ['optional', 'mandatory']:
LOG.warning(_LW('Image config drive option %(image_prop)s is invalid '
'and will be ignored'),
{'image_prop': image_prop},
instance=instance)
return (instance.get('config_drive') or
'always' == CONF.force_config_drive or
strutils.bool_from_string(CONF.force_config_drive) or
image_prop == 'mandatory'
)
| {
"content_hash": "bac184fc85f4ddfddca2b9fe8d9cce61",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 78,
"avg_line_length": 34.424418604651166,
"alnum_prop": 0.5374092214153015,
"repo_name": "cloudbase/nova-virtualbox",
"id": "6e546a76aa752977d9c78a9fc91789ed95da36bc",
"size": "6568",
"binary": false,
"copies": "2",
"ref": "refs/heads/virtualbox_driver",
"path": "nova/virt/configdrive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16016453"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "497954"
}
],
"symlink_target": ""
} |
from __future__ import division
from matplotlib import pyplot as plt
from collections import Counter
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = [i for i, _ in enumerate(variance)]
plt.plot(xs, variance, 'g-', label='variance')
# plt.plot(xs, bias_squared, 'r-.', label='bias^2')
plt.plot(xs, total_error, 'b:', label='total error')
plt.legend(loc=9)
plt.xlabel('model complexity')
plt.title('The Bias-Variance Tradeoff')
plt.show()
| {
"content_hash": "0fcbba6f371cb01007819f2e67607e4e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 25.90909090909091,
"alnum_prop": 0.6543859649122807,
"repo_name": "addamh/data-science-from-scratch",
"id": "377f926eb40c47602563ff46d4844c96346ad176",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter-3/new-line-chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4368"
}
],
"symlink_target": ""
} |
from sys import stdin, stdout
from Bot import Planner
from Bot.Game.Game import Game
from Bot.Parser import Parser
class Bot:
def __init__(self, strategy):
self.game = Game()
self._parser = Parser(self.game)
self._planner = Planner.create(strategy, self.game)
def run(self):
while not stdin.closed:
try:
line = stdin.readline().strip()
if len(line) == 0:
continue
moves = self.interpret(line)
if moves:
self.sendMoves(moves)
except EOFError:
return
def interpret(self, line):
if line.startswith('action'):
return self._planner.makeMove()
else:
self._parser.parse(line)
@staticmethod
def sendMoves(moves):
stdout.write(','.join(moves) + '\n')
stdout.flush()
if __name__ == '__main__':
Bot("random").run() | {
"content_hash": "86ef514389b82d5de2b346ef60162af3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 59,
"avg_line_length": 22.58139534883721,
"alnum_prop": 0.5242018537590113,
"repo_name": "gtagency/tetris-python",
"id": "c662af69bbaf90a2b49463438be7888c2228a155",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BotRun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9971"
}
],
"symlink_target": ""
} |
import socket
import sys
import time
import threading
import select
import traceback
class Server(threading.Thread):
def initialise(self,receive):
self.receive=receive
def run(self):
lis=[]
lis.append(self.receive)
while 1:
read,write,err=select.select(lis,[],[])
for item in read:
try:
s=item.recv(1024)
if s!='':
chunk=s
print str('')+':'+chunk
except:
traceback.print_exc(file=sys.stdout)
break
class Client(threading.Thread):
def connect(self,host,port):
self.sock.connect((host,port))
def client(self,host,port,msg):
sent=self.sock.send(msg)
#print "Sent\n"
def run(self):
self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
host=raw_input("Enter the hostname\n>>")
port=int(raw_input("Enter the port\n>>"))
except EOFError:
print "Error"
return 1
print "Connecting\n"
s=''
self.connect(host,port)
print "Connected\n"
receive=self.sock
time.sleep(1)
srv=Server()
srv.initialise(receive)
srv.daemon=True
print "Starting service"
srv.start()
while 1:
#print "Waiting for message\n"
msg=raw_input('>>')
if msg=='exit':
break
if msg=='':
continue
#print "Sending\n"
self.client(host,port,msg)
return(1)
if __name__=='__main__':
print "Starting client"
cli=Client()
cli.start()
| {
"content_hash": "b3b155207cb6219d318a82a8bcdcff3d",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 29.2,
"alnum_prop": 0.4863013698630137,
"repo_name": "hehaichi/p2p-chat-python",
"id": "abd8dc5c93b00ad630d2e393e319966eb3e6ae0a",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5808"
}
],
"symlink_target": ""
} |
import networkx as nx
import os.path
import scipy.sparse
from numpy import log
import logging
from src.util.FileSystem import FileSystem
from cwsite.models.models import Octave
class AstNetwork(object):
def __init__(self, part, matrixFile, idMap):
self.matrixFile = matrixFile
self.subIdMap = idMap
self.part = part
self.stats = {}
logDir = os.path.join(FileSystem.getLogDir(),'astnetwork')
if not os.path.exists(logDir):
os.makedirs(logDir)
logFileName = os.path.join(logDir,'log')
logging.basicConfig(filename = logFileName, format = '%(asctime)s %(message)s', \
datefmt = '%m/%d/%Y %I:%M:%S %p', level = logging.INFO)
logging.info('AstNetwork Initialization: (hw,part): ' + str(self.part))
def getHomeworkName(self):
hwId = str(self.part[0])
partId = str(self.part[1])
return hwId + '_' + partId
def getPart(self):
return self.part
def getNumAsts(self):
return distanceMatrix.shape[0]
def getNumStudents(self):
numStudents = 0
for sub in self.subIdMap:
numStudents += len(self.subIdMap[sub])
return numStudents
def getInverseSubIdMap(self):
inverseMap = {}
for key in self.subIdMap:
values = self.subIdMap[key]
for value in values:
inverseMap[value] = key
return inverseMap
def getFullGraph(self):
return self.getGraph(30)
def addStat(self, statName, value):
self.stats[statName] = value
def getStatNames(self):
return self.stats.keys()
def getStats(self):
return self.stats
def saveGephi(self, withUnitTests = False):
if withUnitTests == True:
graph = self.getGraphWithUnitTests(5)
else:
graph = self.getGraph(5)
outPath = self._getGephiFilePath()
print 'write gephi: ' + outPath
nx.write_gml(graph, outPath)
def _getGephiFilePath(self):
fileDir = FileSystem.getGephiDir()
fileName = 'gephi_' + self.getHomeworkName() + '.gml'
return os.path.join(fileDir, fileName)
def _getNodeMap(self, components):
nodeMap = {}
reverseMap = {}
idCounter = 0
for component in components:
nodeList = []
for node in component.nodes():
nodeList.append(node)
reverseMap[node] = idCounter
nodeMap[idCounter] = nodeList
idCounter += 1
return nodeMap, reverseMap
def streamedMatrixExample(self):
row = 0
self.matrixFile.seek(0)
while True:
line = self.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
reducedGraph.add_node(row)
for col in range(row + 1, len(rowValues)):
value = rowValues[col]
# do something with the value
# ... anything :)
row += 1
# outputs indexed by asts
def getOutputMap(self):
logging.info('AstNetwork.getOutputMap()')
astList = Octave.objects.filter(homework_id = self.part[0], \
part_id = self.part[1]).values('ast_id','output','correct')
outputs = {}
for ast in astList:
outputs[ast['ast_id']] = (ast['correct'],ast['output'])
return outputs
# asts indexed by output
def getInverseOutputMap(self):
logging.info('AstNetwork.getOutputMap()')
astList = Octave.objects.filter(homework_id = self.part[0], \
part_id = self.part[1]).values('ast_id','output','correct')
asts = {}
for ast in astList:
try:
asts[ast['output']].append((ast['ast_id'],ast['correct']))
except KeyError:
asts[ast['output']] = [(ast['ast_id'],ast['correct'])]
return asts
# return list of correct ast_ids
def getCorrectASTids(self):
logging.info('AstNetwork.getCorrectASTids()')
astList = Octave.objects.filter(homework_id = self.part[0], \
part_id = self.part[1]).values('ast_id','correct')
corrects = [ast['ast_id'] for ast in astList if bool(ast['correct']) == True]
return corrects
# return list of incorrect ast_ids
def getIncorrectASTids(self):
logging.info('AstNetwork.getIncorrectASTids()')
astList = Octave.objects.filter(homework_id = self.part[0], \
part_id = self.part[1]).values('ast_id','correct')
incorrects = [ast['ast_id'] for ast in astList if bool(ast['correct']) == False]
return incorrects
def getGraph(self, maxCuttoff):
graph = nx.Graph()
for key in self.subIdMap:
numStudents = len(self.subIdMap[key])
graph.add_node(key, {'weight': numStudents})
row = 0
self.matrixFile.seek(0)
while(True):
line = self.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
for col in range(row + 1, len(rowValues)):
value = rowValues[col]
if value >= 0 and value <= maxCuttoff:
dissimilarity = value + 1
weight = 1.0 / (dissimilarity * dissimilarity)
graph.add_edge(row, col, {'weight':weight})
row += 1
return graph
def getGraphWithUnitTests(self, maxCutoff):
logging.info('AstNetwork.getGraphWithUnitTests(' + str(maxCutoff) + ')')
graph = nx.Graph()
outputs = self.getOutputMap()
for key in self.subIdMap:
numStudents = len(self.subIdMap[key])
graph.add_node(key, {'weight': numStudents, \
'logWeight' : log(float(numStudents)), \
'output': outputs[key][1], \
'correct': outputs[key][0]})
logging.info('\tastId ' + str(key) + ' of ' + str(len(self.subIdMap)))
row = 0
self.matrixFile.seek(0)
logging.info('\treading matrix...')
while(True):
line = self.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
for col in range(row + 1, len(rowValues)):
value = rowValues[col]
if value >= 0 and value <= maxCutoff:
dissimilarity = value + 1
weight = 1.0 / (dissimilarity * dissimilarity)
graph.add_edge(row, col, {'weight':weight})
row += 1
logging.info('\trow ' + str(row) + ' of ' + str(len(self.subIdMap)))
return graph
def getGraphOld(self, minCuttoff, maxCuttoff):
# later I should allow non zero minCuttoffs. I will need to
# update the update of clusteredGraph edges to be the max
# of edges seen so far between ASTs in different clusteredNodes
assert minCuttoff == 0
print 'create reduced graph'
row = 0
reducedGraph = nx.Graph()
self.matrixFile.seek(0)
while(True):
line = self.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
reducedGraph.add_node(row)
for col in range(row + 1, len(rowValues)):
value = rowValues[col]
if value == 0:
reducedGraph.add_edge(row, col)
row += 1
components = nx.connected_component_subgraphs(reducedGraph)
nodeMap, reverseMap = self._getNodeMap(components)
print 'created clustered graph nodes'
clusteredGraph = nx.Graph()
for nodeId in nodeMap:
numStudents = 0
for node in nodeMap[nodeId]:
count = len(self.subIdMap[node])
numStudents += count
clusteredGraph.add_node(nodeId, {'weight':numStudents})
self.matrixFile.seek(0)
print 'create clustered graph edges'
row = 0
while(True):
line = self.matrixFile.readline()
if not line: break
rowValues = map(int, line.strip().split())
for col in range(row + 1, len(rowValues)):
value = rowValues[col]
if value >= 0 and value <= maxCuttoff:
editDistance = value
node1 = reverseMap[row]
node2 = reverseMap[col]
if node1 == node2: continue
weight = 1.0 / (editDistance * editDistance)
clusteredGraph.add_edge(node1, node2, {'weight':weight})
row += 1
return clusteredGraph
def _getGraphDist(self, node1, node2, nodeMap):
return 1
| {
"content_hash": "33d7206a1435723bebb105ea4f9ebf7a",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 89,
"avg_line_length": 36.86991869918699,
"alnum_prop": 0.5429988974641676,
"repo_name": "tanonev/codewebs",
"id": "e3746319be85571f78b0b6f92867c145c8f0c217",
"size": "9070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/util/AstNetwork.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "790"
},
{
"name": "C++",
"bytes": "301221"
},
{
"name": "Java",
"bytes": "479184"
},
{
"name": "Makefile",
"bytes": "5459"
},
{
"name": "Matlab",
"bytes": "50455"
},
{
"name": "Python",
"bytes": "230306"
},
{
"name": "Shell",
"bytes": "13311"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.views import generic
from models import *
from django.http import HttpResponse, HttpResponseRedirect
from home.models import Languages, Menu, MenuInfo
from django.http import Http404
import json, simplejson
import time
class IndexViews(generic.View):
templates_file = 'thanksIndex.html'
def get(self, request):
lang = request.LANGUAGE_CODE
languages = list(Languages.objects.all())
if not languages:
languages = []
dlangs = list(Languages.objects.filter(text=lang))
if dlangs:
dlang = dlangs.pop().id
else:
raise Http404
menus = list(Menu.objects.all().order_by('sort'))
if menus:
menu_list = []
for p in menus:
menuinfos = list(MenuInfo.objects.filter(language=dlang, menu=p.id))
if menuinfos:
a = {"id": p.id, "url": p.url, "name": menuinfos.pop().name}
else:
a = {"id": p.id, "url": p.url, "name": menuinfos.pop().name}
menu_list.append(a)
else:
menu_list = []
groups = list(Group.objects.all().order_by('sort'))
if groups:
group_list = []
for g in groups:
ginfos = list(GroupInfo.objects.filter(language=dlang, group=g.id))
if ginfos:
groupname = ginfos.pop().name
else:
groupname = ""
thanks = list(Thanks.objects.filter(group=g.id))
if thanks:
thanks_list = []
for t in thanks:
b = {'name': t.name, 'url': t.url, 'img': t.img}
thanks_list.append(b)
else:
thanks_list = []
a = {'group': groupname, 'thanks_list': thanks_list}
group_list.append(a)
else:
group_list = []
vthankss = list(VThanks.objects.all())
if not vthankss:
vthankss = []
context = {
'group_list': group_list,
'vthankss': vthankss,
'languages': languages,
'lang': lang,
'menu_list': menu_list,
}
return render(request,
self.templates_file,
context)
| {
"content_hash": "02ec4d98a07c01cd8fd24a07cefd66c6",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 84,
"avg_line_length": 29.670731707317074,
"alnum_prop": 0.4928072338676531,
"repo_name": "CooloiStudio/Turanga.deskxd.com",
"id": "015339d8e9fd9baed8d7c2b9661e74ed625ad163",
"size": "2433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thanks/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "181940"
},
{
"name": "HTML",
"bytes": "48376"
},
{
"name": "JavaScript",
"bytes": "116957"
},
{
"name": "Python",
"bytes": "33713"
}
],
"symlink_target": ""
} |
""" Example demonstrating turning lines on and off - with JS only
"""
import numpy as np
from bokeh.io import output_file, show
from bokeh.layouts import row
from bokeh.palettes import Viridis3
from bokeh.plotting import figure
from bokeh.models import CheckboxGroup, CustomJS
output_file("line_on_off.html", title="line_on_off.py example")
p = figure()
props = dict(line_width=4, line_alpha=0.7)
x = np.linspace(0, 4 * np.pi, 100)
l0 = p.line(x, np.sin(x), color=Viridis3[0], legend="Line 0", **props)
l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props)
l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props)
checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"],
active=[0, 1, 2], width=100)
checkbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),
lang="coffeescript", code="""
l0.visible = 0 in checkbox.active;
l1.visible = 1 in checkbox.active;
l2.visible = 2 in checkbox.active;
""")
layout = row(checkbox, p)
show(layout)
| {
"content_hash": "8047c9e2ac13a2503245e412459081cb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 33.1875,
"alnum_prop": 0.6629001883239172,
"repo_name": "ptitjano/bokeh",
"id": "18891c00df6590008ecb6a42252263ed371f5c63",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plotting/file/line_on_off.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "406989"
},
{
"name": "CoffeeScript",
"bytes": "1073573"
},
{
"name": "HTML",
"bytes": "45510"
},
{
"name": "JavaScript",
"bytes": "12173"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2083050"
},
{
"name": "Shell",
"bytes": "15584"
},
{
"name": "TypeScript",
"bytes": "25843"
}
],
"symlink_target": ""
} |
import socket
import sys
HOST, PORT = "ec2-54-227-4-154.compute-1.amazonaws.com", 3000
data = " ".join(sys.argv[1:])
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(data + "\n")
# Receive data from the server and shut down
received = sock.recv(256*1024*1024*1024)
finally:
sock.close()
print "Sent: {}".format(data)
print "Received: {}".format(len(received))
| {
"content_hash": "2859e21b59d2ef83be37b328e7cd5e16",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.6741996233521658,
"repo_name": "ketanbj/eapps",
"id": "7d7eaae645e066dae37e6abec08967552eb8538e",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyephserver/testFamulous.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "152138"
},
{
"name": "C++",
"bytes": "331314"
},
{
"name": "Java",
"bytes": "2744268"
},
{
"name": "JavaScript",
"bytes": "10002"
},
{
"name": "Makefile",
"bytes": "33842"
},
{
"name": "Python",
"bytes": "13777"
},
{
"name": "Shell",
"bytes": "184"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. DK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_DK = PhoneMetadata(id='DK', country_code=45, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:[2-7]\\d|8[126-9]|9[1-36-9])\\d{6}', example_number='32123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[2-7]\\d|8[126-9]|9[1-36-9])\\d{6}', example_number='20123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{6}', example_number='80123456', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{6}', example_number='90123456', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')],
mobile_number_portable_region=True)
| {
"content_hash": "0f5e0c445abb278babd5c4c1a3e8f3af",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 145,
"avg_line_length": 89.36363636363636,
"alnum_prop": 0.6876907426246185,
"repo_name": "gencer/python-phonenumbers",
"id": "dcd434fe85e53bfc979a02ff699437e31d4c8c1d",
"size": "983",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/data/region_DK.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23039485"
}
],
"symlink_target": ""
} |
import json
import urllib2
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from utils import get_blocked_videos
from utils import interpolated_prec_rec
from utils import segment_iou
class ANETdetection(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PREDICTION_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, prediction_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
prediction_fields=PREDICTION_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation', verbose=False,
check_status=True):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = prediction_fields
self.ap = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
print '[INIT] Loaded annotations from {} subset.'.format(subset)
nr_gt = len(self.ground_truth)
print '\tNumber of ground truth instances: {}'.format(nr_gt)
nr_pred = len(self.prediction)
print '\tNumber of predictions: {}'.format(nr_pred)
print '\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds)
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
for videoid, v in data['database'].iteritems():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
t_start_lst.append(float(ann['segment'][0]))
t_end_lst.append(float(ann['segment'][1]))
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst})
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Reads prediction file, checks if it is well formatted, and returns
the prediction instances.
Parameters
----------
prediction_filename : str
Full path to the prediction json file.
Outputs
-------
prediction : df
Data frame containing the prediction instances.
"""
with open(prediction_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid prediction file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
label_lst, score_lst = [], []
for videoid, v in data['results'].iteritems():
if videoid in self.blocked_videos:
continue
for result in v:
label = self.activity_index[result['label']]
video_lst.append(videoid)
t_start_lst.append(float(result['segment'][0]))
t_end_lst.append(float(result['segment'][1]))
label_lst.append(label)
score_lst.append(result['score'])
prediction = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst,
'score': score_lst})
return prediction
def _get_predictions_with_label(self, prediction_by_label, label_name, cidx):
"""Get all predicitons of the given label. Return empty DataFrame if there
is no predcitions with the given label.
"""
try:
return prediction_by_label.get_group(cidx).reset_index(drop=True)
except:
print 'Warning: No predictions of label \'%s\' were provdied.' % label_name
return pd.DataFrame()
def wrapper_compute_average_precision(self):
"""Computes average precision for each class in the subset.
"""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
# Adaptation to query faster
ground_truth_by_label = self.ground_truth.groupby('label')
prediction_by_label = self.prediction.groupby('label')
results = Parallel(n_jobs=len(self.activity_index))(
delayed(compute_average_precision_detection)(
ground_truth=ground_truth_by_label.get_group(cidx).reset_index(drop=True),
prediction=self._get_predictions_with_label(prediction_by_label, label_name, cidx),
tiou_thresholds=self.tiou_thresholds,
) for label_name, cidx in self.activity_index.items())
for i, cidx in enumerate(self.activity_index.values()):
ap[:,cidx] = results[i]
return ap
def evaluate(self):
"""Evaluates a prediction file. For the detection task we measure the
interpolated mean average precision to measure the performance of a
method.
"""
self.ap = self.wrapper_compute_average_precision()
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
if self.verbose:
print '[RESULTS] Performance on ActivityNet detection task.'
print '\tAverage-mAP: {}'.format(self.average_mAP)
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as
true positive. This code is greatly inspired by Pascal VOC devkit.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
prediction : df
Data frame containing the prediction instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
Temporal intersection over union threshold.
Outputs
-------
ap : float
Average precision score.
"""
ap = np.zeros(len(tiou_thresholds))
if prediction.empty:
return ap
npos = float(len(ground_truth))
lock_gt = np.ones((len(tiou_thresholds),len(ground_truth))) * -1
# Sort predictions by decreasing score order.
sort_idx = prediction['score'].values.argsort()[::-1]
prediction = prediction.loc[sort_idx].reset_index(drop=True)
# Initialize true positive and false positive vectors.
tp = np.zeros((len(tiou_thresholds), len(prediction)))
fp = np.zeros((len(tiou_thresholds), len(prediction)))
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
# Assigning true positive to truly grount truth instances.
for idx, this_pred in prediction.iterrows():
try:
# Check if there is at least one ground truth in the video associated.
ground_truth_videoid = ground_truth_gbvn.get_group(this_pred['video-id'])
except Exception as e:
fp[:, idx] = 1
continue
this_gt = ground_truth_videoid.reset_index()
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
this_gt[['t-start', 't-end']].values)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for tidx, tiou_thr in enumerate(tiou_thresholds):
for jdx in tiou_sorted_idx:
if tiou_arr[jdx] < tiou_thr:
fp[tidx, idx] = 1
break
if lock_gt[tidx, this_gt.loc[jdx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[tidx, idx] = 1
lock_gt[tidx, this_gt.loc[jdx]['index']] = idx
break
if fp[tidx, idx] == 0 and tp[tidx, idx] == 0:
fp[tidx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / npos
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for tidx in range(len(tiou_thresholds)):
ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:])
return ap
| {
"content_hash": "eb004ecfc6b752a588899febabda3f3e",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 110,
"avg_line_length": 40.426923076923075,
"alnum_prop": 0.58129578536771,
"repo_name": "activitynet/ActivityNet",
"id": "6c83c178bc39cbfa6da5e753dca1b72c7bb9c459",
"size": "10511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Evaluation/eval_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22855212"
},
{
"name": "Python",
"bytes": "202438"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
""" Course API module
This module offers a number of URL endpoints with which
you are able to GET, POST, PUT and DELETE all the content
of a course. There are other methods available in other
modules, but these are easier and simpler to use.
"""
from models import custom_modules
from modules.um_course_api import handlers
def register_module():
"""Registers this module in the registry."""
global_urls = [
('/api/courses', handlers.CoursesAPIHandler),
('/api/courses/(.*)', handlers.CoursesAPIHandler),
('/api/courses/(.*)/units', handlers.UnitsAPIHandler),
('/api/courses/(.*)/units/(.*)', handlers.UnitsAPIHandler)
]
course_urls = []
global custom_module
custom_module = custom_modules.Module(
'Course API',
'A set of URL endpoints to access all the course data.',
global_urls, course_urls)
return custom_module
| {
"content_hash": "b43c1f4cd2100ad8685a01bfa2e604af",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 29.774193548387096,
"alnum_prop": 0.6576381365113759,
"repo_name": "UniMOOC/AAClassroom",
"id": "cd20f9c1bad93d17dcaf767d513a144291a3a247",
"size": "1518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/um_course_api/um_course_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "72417"
},
{
"name": "CSS",
"bytes": "145725"
},
{
"name": "HTML",
"bytes": "275155"
},
{
"name": "JavaScript",
"bytes": "529343"
},
{
"name": "Python",
"bytes": "3267609"
},
{
"name": "Shell",
"bytes": "18536"
}
],
"symlink_target": ""
} |
"""
A driver for XenServer or Xen Cloud Platform.
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import math
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import units
import six.moves.urllib.parse as urlparse
from nova.i18n import _, _LE, _LW
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import driver
from nova.virt.xenapi.client import session
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('connection_url',
help='URL for connection to XenServer/Xen Cloud Platform. '
'A special value of unix://local can be used to connect '
'to the local unix socket. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('connection_password',
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.FloatOpt('vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('vhd_coalesce_max_attempts',
default=20,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
help='The iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='The iSCSI Target Port, default is port 3260'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts, 'xenserver')
CONF.import_opt('host', 'nova.netconf')
OVERHEAD_BASE = 3
OVERHEAD_PER_MB = 0.00781
OVERHEAD_PER_VCPU = 1.5
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenserver.connection_url
username = CONF.xenserver.connection_username
password = CONF.xenserver.connection_password
if not url or password is None:
raise Exception(_('Must specify connection_url, '
'connection_username (optionally), and '
'connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = session.XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenserver.check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_LE('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if supplied instance exists on the host, False otherwise.
NOTE(belliott): This is an override of the base method for
efficiency.
"""
return self._vmops.instance_exists(instance.name)
def estimate_instance_overhead(self, instance_info):
"""Get virtualization overhead required to build an instance of the
given flavor.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Overhead memory in MB.
"""
# XenServer memory overhead is proportional to the size of the
# VM. Larger flavor VMs become more efficient with respect to
# overhead.
# interpolated formula to predict overhead required per vm.
# based on data from:
# https://wiki.openstack.org/wiki/XenServer/Overhead
# Some padding is done to each value to fit all available VM data
memory_mb = instance_info['memory_mb']
vcpus = instance_info.get('vcpus', 1)
overhead = ((memory_mb * OVERHEAD_PER_MB) + (vcpus * OVERHEAD_PER_VCPU)
+ OVERHEAD_BASE)
overhead = math.ceil(overhead)
return {'memory_mb': overhead}
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(context, instance,
block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after a failed snapshot."""
self._vmops.post_interrupted_snapshot_cleanup(context, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor, block_device_info)
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance."""
self._vmops.set_bootable(instance, is_bootable)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = {inst['name']: inst['uuid'] for inst in instances}
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, context, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.host_state.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warning(_LW('Could not determine key: %s'), err,
instance=instance)
self._initiator = None
return {
'ip': self._get_block_storage_ip(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
def _get_block_storage_ip(self):
# If CONF.my_block_storage_ip is set, use it.
if CONF.my_block_storage_ip != CONF.my_ip:
return CONF.my_block_storage_ip
return self.get_host_ip_addr()
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return xs_url.netloc
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage from VM instance."""
self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenserver.connection_username,
'password': CONF.xenserver.connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.host_state.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / units.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi
total_disk_gb = host_stats['disk_total'] / units.Gi
used_disk_gb = host_stats['disk_used'] / units.Gi
allocated_disk_gb = host_stats['disk_allocated'] / units.Gi
hyper_ver = utils.convert_version_to_int(self._session.product_version)
dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'],
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': host_stats['vcpus_used'],
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': hyper_ver,
'hypervisor_hostname': host_stats['host_hostname'],
# Todo(bobba) cpu_info may be in a format not supported by
# arch_filter.py - see libvirt/driver.py get_cpu_info
'cpu_info': jsonutils.dumps(host_stats['host_cpu_info']),
'disk_available_least': total_disk_gb - allocated_disk_gb,
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
'pci_passthrough_devices': jsonutils.dumps(
host_stats['pci_passthrough_devices']),
'numa_topology': None}
return dic
def ensure_filtering_rules_for_instance(self, instance, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(context,
instance,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
:param block_device_info: result of _get_instance_block_device_info
"""
return self._vmops.check_can_live_migrate_source(context, instance,
dest_check_data)
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: implementation specific params
"""
self._vmops.live_migrate(context, instance, dest, post_method,
recover_method, block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
# NOTE(johngarbutt) Destroying the VM is not appropriate here
# and in the cases where it might make sense,
# XenServer has already done it.
# TODO(johngarbutt) investigate if any cleanup is required here
pass
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration.
:param block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pre_live_migration_result = {}
pre_live_migration_result['sr_uuid_map'] = \
self._vmops.connect_block_device_volumes(block_device_info)
return pre_live_migration_result
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: if not None, it is a dict which has data
"""
self._vmops.post_live_migration(context, instance, migrate_data)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
self._vmops.post_live_migration_at_destination(context, instance,
network_info, block_device_info, block_device_info)
def unfilter_instance(self, instance, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated.
"""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group.
"""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group.
"""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_available_nodes(self, refresh=False):
stats = self.host_state.get_host_stats(refresh=refresh)
return [stats["hypervisor_hostname"]]
def host_power_action(self, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, enabled):
"""Sets the compute host's ability to accept new instances."""
return self._host.set_host_enabled(enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return self._vmops.get_per_instance_usage()
| {
"content_hash": "3c0861fc6db35a0b400e5571c7d65a8e",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 79,
"avg_line_length": 42.72645739910314,
"alnum_prop": 0.6049538203190596,
"repo_name": "projectcalico/calico-nova",
"id": "fd56bc15debca92c534692176d80067d8adcf960",
"size": "29239",
"binary": false,
"copies": "2",
"ref": "refs/heads/calico-readme",
"path": "nova/virt/xenapi/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15232446"
},
{
"name": "Shell",
"bytes": "20717"
},
{
"name": "Smarty",
"bytes": "489680"
}
],
"symlink_target": ""
} |
import re
from django import forms
from django.forms.util import ErrorList
from django.forms.widgets import HiddenInput, SelectMultiple
from crits.core import form_consts
from crits.core.handlers import get_source_names, get_item_names, ui_themes
from crits.core.user_role import UserRole
from crits.core.user_tools import get_user_organization
from crits.config.config import CRITsConfig
from crits import settings
def add_bucketlist_to_form(input_form):
"""
Add a bucket_list field to a form.
:param input_form: The form to add to.
:type input_form: :class:`django.forms.Form`
:returns: :class:`django.forms.Form`
"""
input_form.fields[form_consts.Common.BUCKET_LIST_VARIABLE_NAME] = \
forms.CharField(widget=forms.TextInput,
required=False,
label=form_consts.Common.BUCKET_LIST,
help_text="Use comma separated values.")
def add_ticket_to_form(input_form):
"""
Add a tickets field to a form.
:param input_form: The form to add to.
:type input_form: :class:`django.forms.Form`
:returns: :class:`django.forms.Form`
"""
input_form.fields[form_consts.Common.TICKET_VARIABLE_NAME] = \
forms.CharField(widget=forms.TextInput,
required=False,
label=form_consts.Common.TICKET,
help_text="Use comma separated values.")
class AddSourceForm(forms.Form):
"""
Django form for adding a new source to CRITs.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.CharField(widget=forms.TextInput, required=True)
class AddReleasabilityForm(forms.Form):
"""
Django form for adding a new releasability instance to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.ChoiceField(required=True, widget=forms.Select)
def __init__(self, username, *args, **kwargs):
super(AddReleasabilityForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [(c.name,
c.name) for c in get_source_names(True,
True,
username)]
class NavMenuForm(forms.Form):
"""
Django form for the user preferences navigation menu.
"""
error_css_class = 'error'
required_css_class = 'required'
DEFAULT_TExT_COLOR = "#FFF"
DEFAULT_BACKGROUND_COLOR = '#464646'
DEFAULT_HOVER_TEXT_COLOR = '#39F'
DEFAULT_HOVER_BACKGROUND_COLOR = '#6F6F6F'
nav_menu = forms.ChoiceField(widget=forms.RadioSelect(), initial="default",
help_text="Colors currently only work with topmenu. \
Examples of valid color codes: #39F or #9AAED8.")
text_color = forms.CharField(label="Text Color", initial=DEFAULT_TExT_COLOR,
help_text="Default: " + DEFAULT_TExT_COLOR)
background_color = forms.CharField(label="Background Color", initial=DEFAULT_BACKGROUND_COLOR,
help_text="Default: " + DEFAULT_BACKGROUND_COLOR)
hover_text_color = forms.CharField(label="Hover Text Color", initial=DEFAULT_HOVER_TEXT_COLOR,
help_text="Default: " + DEFAULT_HOVER_TEXT_COLOR)
hover_background_color = forms.CharField(label="Hover Background Color", initial=DEFAULT_HOVER_BACKGROUND_COLOR,
help_text="Default: " + DEFAULT_HOVER_BACKGROUND_COLOR)
def __init__(self, request, *args, **kwargs):
super(NavMenuForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
for k in prefs.nav:
if k in self.fields:
self.fields[k].initial = prefs.nav[k]
self.fields['nav_menu'].choices = [('default','default'),
('topmenu','topmenu')]
def clean(self):
cleaned_data = super(NavMenuForm, self).clean()
def check_hex_color(self, color_code, field_name):
if not re.match('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_code):
self._errors.setdefault(field_name, ErrorList())
self._errors[field_name].append("This is not a valid color code. Valid examples: #39F or #9AAED8")
check_hex_color(self, cleaned_data.get('text_color'), 'text_color')
check_hex_color(self, cleaned_data.get('background_color'), 'background_color')
check_hex_color(self, cleaned_data.get('hover_text_color'), 'hover_text_color')
check_hex_color(self, cleaned_data.get('hover_background_color'), 'hover_background_color')
return cleaned_data
class PrefUIForm(forms.Form):
"""
Django form for the user preferences interface.
"""
error_css_class = 'error'
required_css_class = 'required'
theme = forms.ChoiceField(required=True, widget=forms.Select,
initial="default")
# layeredthemes = forms.MultipleChoiceField(required=True,
# label="Layer Themes",
# help_text="Pick Themes to use",
# widget=forms.SelectMultiple)
table_page_size = forms.IntegerField(required=True, min_value = 2, max_value = 10000,
initial=25)
def __init__(self, request, *args, **kwargs):
super(PrefUIForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
for k in prefs.ui:
if k in self.fields:
self.fields[k].initial = prefs.ui[k]
# self.fields['layeredthemes'].choices = self.fields['theme'].choices
self.fields['theme'].choices = [(t,
t) for t in ui_themes()]
class ToastNotificationConfigForm(forms.Form):
"""
Django form for the user toast notifications.
"""
error_css_class = 'error'
required_css_class = 'required'
enabled = forms.BooleanField(initial=True, required=False)
max_visible_notifications = forms.IntegerField(min_value = 1,
max_value = 10,
initial=5,
required=False,
label="Max Visible Notifications")
acknowledgement_type = forms.ChoiceField(widget=forms.Select,
initial="sticky",
required=False,
label="Acknowledgement Type")
notification_anchor_location = forms.ChoiceField(widget=forms.Select,
initial="bottom_right",
required=False,
label="Anchor Location")
newer_notifications_location = forms.ChoiceField(widget=forms.Select,
initial="top",
required=False,
label="Newer Notifications Located")
initial_notifications_display = forms.ChoiceField(widget=forms.Select,
initial="show",
required=False,
label="On New Notifications")
timeout = forms.IntegerField(min_value = 5,
max_value = 3600,
initial=30,
required=False,
label="Timeout (in seconds)",
help_text="Used only if Acknowledgement Type is set to 'timeout'")
def __init__(self, request, *args, **kwargs):
super(ToastNotificationConfigForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
if hasattr(prefs, 'toast_notifications'):
for k in prefs.toast_notifications:
if k in self.fields:
self.fields[k].initial = prefs.toast_notifications[k]
self.fields['acknowledgement_type'].choices = [("sticky", "sticky"),
("timeout", "timeout")]
self.fields['notification_anchor_location'].choices = [("top_right", "top_right"),
("bottom_right", "bottom_right")]
self.fields['newer_notifications_location'].choices = [("top", "top"),
("bottom", "bottom")]
self.fields['initial_notifications_display'].choices = [("show", "show"),
("hide", "hide")]
class AddUserRoleForm(forms.Form):
"""
Django form for adding a new user role.
"""
error_css_class = 'error'
required_css_class = 'required'
role = forms.CharField(widget=forms.TextInput, required=True)
class DownloadFileForm(forms.Form):
"""
Django form for downloading a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
obj_type = forms.CharField(widget=HiddenInput)
obj_id = forms.CharField(widget=HiddenInput)
objects = forms.MultipleChoiceField(required=True, label="Objects",
help_text="Objects to collect",
widget=forms.SelectMultiple)
depth_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Depth",
initial=0,
help_text="Depth levels to traverse.<br />" +
"0 for this object only. Max: %i")
total_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Maximum",
help_text="Total objects to return. Max: %i")
rel_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Relationships",
help_text="If an object has more relationships<br />" +
"than this, ignore it. Max: %i")
rst_fmt = forms.ChoiceField(choices=[("zip", "zip"),
("stix", "STIX"),
("stix_no_bin", "STIX (no binaries)")],
label="Result format")
bin_fmt = forms.ChoiceField(choices=[("raw", "raw"),
("base64", "base64"),
("zlib", "zlib")],
label="Binary format")
def __init__(self, *args, **kwargs):
crits_config = CRITsConfig.objects().first()
depth_max = getattr(crits_config, 'depth_max', settings.DEPTH_MAX)
total_max = getattr(crits_config, 'total_max', settings.TOTAL_MAX)
rel_max = getattr(crits_config, 'rel_max', settings.REL_MAX)
super(DownloadFileForm, self).__init__(*args, **kwargs)
self.fields['objects'].choices = [('Actor', 'Actors'),
('Certificate', 'Certificates'),
('Domain', 'Domains'),
('Email', 'Emails'),
('Indicator', 'Indicators'),
('PCAP', 'PCAPs'),
('RawData', 'Raw Data'),
('Sample', 'Samples')]
self.fields['total_limit'].initial = total_max
self.fields['rel_limit'].initial = rel_max
self.fields['depth_limit'].help_text = self.fields['depth_limit'].help_text % depth_max
self.fields['total_limit'].help_text = self.fields['total_limit'].help_text % total_max
self.fields['rel_limit'].help_text = self.fields['rel_limit'].help_text % rel_max
class TLDUpdateForm(forms.Form):
"""
Django form to update the TLD list.
"""
error_css_class = 'error'
required_css_class = 'required'
filedata = forms.FileField()
class SourceAccessForm(forms.Form):
"""
Django form for updating a user's profile and source access.
"""
error_css_class = 'error'
required_css_class = 'required'
username = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
first_name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
last_name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
email = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
sources = forms.MultipleChoiceField(required=True,
widget=SelectMultiple(attrs={'class':'multiselect',
'style': 'height: auto;'}))
organization = forms.ChoiceField(required=True, widget=forms.Select)
role = forms.ChoiceField(required=True, widget=forms.Select)
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'textbox'}),
required=False)
totp = forms.BooleanField(initial=False, required=False)
secret = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
subscriptions = forms.CharField(required=False, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(SourceAccessForm, self).__init__(*args, **kwargs)
self.fields['sources'].choices = [(c.name,
c.name) for c in get_source_names(False,
False,
None)]
self.fields['role'].choices = [(c.name,
c.name) for c in get_item_names(UserRole,
True)]
self.fields['organization'].choices = [(c.name,
c.name) for c in get_source_names(True,
False,
None)]
class SourceForm(forms.Form):
"""
Django form to add source information to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.ChoiceField(required=True, widget=forms.Select)
date = forms.CharField(widget=HiddenInput(attrs={'readonly': 'readonly',
'id': 'source_added_date'}),
required=False)
method = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
required=False)
reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
required=False)
analyst = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))
def __init__(self, username, *args, **kwargs):
super(SourceForm, self).__init__(*args, **kwargs)
self.fields['name'].choices = [(c.name,
c.name) for c in get_source_names(True,
True,
username)]
self.fields['name'].initial = get_user_organization(username)
class TicketForm(forms.Form):
"""
Django form to add a ticket to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
ticket_number = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
date = forms.CharField( widget=forms.HiddenInput(attrs={'size': '50',
'readonly':'readonly',
'id':'id_indicator_ticket_date'}))
| {
"content_hash": "c4a26e126bb8359ed928ff32d7dd1900",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 116,
"avg_line_length": 46.459016393442624,
"alnum_prop": 0.5004116678428605,
"repo_name": "davidhdz/crits",
"id": "e7f962d1c11450b5fb654b4558a96d0daab9f6ca",
"size": "17004",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "crits/core/forms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "360810"
},
{
"name": "HTML",
"bytes": "452517"
},
{
"name": "JavaScript",
"bytes": "2022328"
},
{
"name": "Perl",
"bytes": "916"
},
{
"name": "Prolog",
"bytes": "948"
},
{
"name": "Python",
"bytes": "1928789"
},
{
"name": "Shell",
"bytes": "10551"
}
],
"symlink_target": ""
} |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import matplotlib.ticker as mtick
import numpy as np
import datetime
dforlando = pd.read_csv("ORLANDO_Full.csv", usecols=[3,4], header=0, names=['url', 'text'], nrows=89999, parse_dates=True)
dfmallet = pd.read_csv("mallet15.txt", delim_whitespace=True, header=0, usecols=[1, 2], dtype='object', names=['File', 'Topic'], nrows=89999)
#removed pulling in the file #, can add it again if you want but it'll probably be another set of code.
dfmalletseries = dfmallet['File'].str.replace('\D+', ' ')
dfmallettrue = dfmallet.merge(dfmalletseries.to_frame(), left_index=True, right_index=True)
df2 = pd.to_numeric(dfmallettrue['File_y'])
dfmalletorder = dfmallettrue.merge(df2.to_frame(), left_index=True, right_index=True)
df3 = dfmalletorder.sort_values(['File_y_y'])
df_final = df3.join(dforlando, on=['File_y_y'])
topic0 = df_final.loc[df_final['Topic'] == '0']
topic1 = df_final.loc[df_final['Topic'] == '1']
topic2 = df_final.loc[df_final['Topic'] == '2']
topic3 = df_final.loc[df_final['Topic'] == '3']
topic4 = df_final.loc[df_final['Topic'] == '4']
topic5 = df_final.loc[df_final['Topic'] == '5']
topic6 = df_final.loc[df_final['Topic'] == '6']
topic7 = df_final.loc[df_final['Topic'] == '7']
topic8 = df_final.loc[df_final['Topic'] == '8']
topic9 = df_final.loc[df_final['Topic'] == '9']
topic10 = df_final.loc[df_final['Topic'] == '10']
topic11 = df_final.loc[df_final['Topic'] == '11']
topic12 = df_final.loc[df_final['Topic'] == '12']
topic13 = df_final.loc[df_final['Topic'] == '13']
topic14 = df_final.loc[df_final['Topic'] == '14']
#Change this to the topic you want to output to csv.
topic7.to_csv(r'exemplars7.csv')
| {
"content_hash": "b095164768f14a355eb105765b338c7c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 141,
"avg_line_length": 44.125,
"alnum_prop": 0.684985835694051,
"repo_name": "Mattio89/visualization",
"id": "1c165d42cefc559f414bbb4ebc73bd326d85981a",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "find_tweets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7223"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
from cryptol import cryptoltypes
from cryptol.quoting import to_cryptol_str_customf
from .utils import deprecated
from dataclasses import dataclass
import dataclasses
import re
from typing import Any, Dict, List, Tuple, Optional, Set, Union, overload
from typing_extensions import Literal
import inspect
import uuid
from .llvm_type import *
from .jvm_type import *
JSON = Union[None, bool, int, float, str, Dict, Tuple, List]
class SetupVal(metaclass=ABCMeta):
"""Represent a ``SetupValue`` in SawScript, which "corresponds to
values that can occur during symbolic execution, which includes both 'Term'
values, pointers, and composite types consisting of either of these
(both structures and arrays)."
"""
@abstractmethod
def to_json(self) -> JSON:
"""JSON representation for this ``SetupVal`` (i.e., how it is represented in expressions, etc).
N.B., should be a JSON object with a ``'setup value'`` field with a unique tag which the
server will dispatch on to then interpret the rest of the JSON object.``"""
pass
@overload
def __getitem__(self, key : int) -> 'ElemVal':
pass
@overload
def __getitem__(self, key : str) -> 'FieldVal':
pass
def __getitem__(self, key : Union[int,str]) -> 'SetupVal':
"""``SetupVal`` element indexing and field access.
:param key: If ``key`` is an integer, a ``SetupVal`` corresponding to accessing the element
at that index is returned. If ``key`` is a string, a ``SetupVal`` corresponding
to accessing a field with that name is returned.
"""
if isinstance(key, int):
return elem(self, key)
elif isinstance(key, str):
return field(self, key)
else:
raise ValueError(f'{key!r} is not a valid element index or field name.')
class NamedSetupVal(SetupVal):
"""Represents those ``SetupVal``s which are a named reference to some value, e.e., a variable
or reference to allocated memory."""
@abstractmethod
def to_init_json(self) -> JSON:
"""JSON representation with the information for those ``SetupVal``s which require additional
information to initialize/allocate them vs that which is required later to reference them.
I.e., ``.to_json()`` will be used to refer to such ``SetupVal``s in expressions, and
``.to_init_json() is used to initialize/allocate them.``
"""
pass
class CryptolTerm(SetupVal):
expression : cryptoltypes.CryptolJSON
def __init__(self, code : Union[str, cryptoltypes.CryptolJSON]):
if isinstance(code, str):
self.expression = cryptoltypes.CryptolLiteral(code)
else:
self.expression = code
def __call__(self, *args : cryptoltypes.CryptolJSON) -> 'CryptolTerm':
return CryptolTerm(cryptoltypes.CryptolApplication(self.expression, *args))
def __repr__(self) -> str:
return f"CryptolTerm({self.expression!r})"
def to_json(self) -> JSON:
return {'setup value': 'Cryptol', 'expression': cryptoltypes.to_cryptol(self.expression)}
def __to_cryptol__(self) -> cryptoltypes.JSON:
return self.expression.__to_cryptol__()
def __to_cryptol_str__(self) -> str:
return self.expression.__to_cryptol_str__()
class FreshVar(NamedSetupVal):
__name : Optional[str]
def __init__(self, spec : 'Contract', type : Union['LLVMType', 'JVMType'], suggested_name : Optional[str] = None) -> None:
self.__name = suggested_name
self.spec = spec
self.type = type
def __to_cryptol__(self) -> cryptoltypes.JSON:
return cryptoltypes.CryptolLiteral(self.name()).__to_cryptol__()
def __to_cryptol_str__(self) -> str:
return cryptoltypes.CryptolLiteral(self.name()).__to_cryptol_str__()
def to_init_json(self) -> JSON:
#FIXME it seems we don't actually use two names ever... just the one...do we actually need both?
name = self.name()
return {"server name": name,
"name": name,
"type": self.type.to_json()}
def name(self) -> str:
if self.__name is None:
self.__name = self.spec.get_fresh_name()
return self.__name
def to_json(self) -> JSON:
return {'setup value': 'named', 'name': self.name()}
class Allocated(NamedSetupVal):
name : Optional[str]
def __init__(self, spec : 'Contract', type : Union['LLVMType','JVMType'], *,
mutable : bool = True, alignment : Optional[int] = None) -> None:
self.name = None
self.spec = spec
self.type = type
self.mutable = mutable
self.alignment = alignment
def to_init_json(self) -> JSON:
if self.name is None:
self.name = self.spec.get_fresh_name()
return {"server name": self.name,
"type": self.type.to_json(),
"mutable": self.mutable,
"alignment": self.alignment}
def to_json(self) -> JSON:
if self.name is None:
self.name = self.spec.get_fresh_name()
return {'setup value': 'named', 'name': self.name}
class StructVal(SetupVal):
fields : List[SetupVal]
def __init__(self, fields : List[SetupVal]) -> None:
self.fields = fields
def to_json(self) -> JSON:
return {'setup value': 'tuple', 'elements': [fld.to_json() for fld in self.fields]}
class ElemVal(SetupVal):
base : SetupVal
index : int
def __init__(self, base : SetupVal, index : int) -> None:
self.base = base
self.index = index
def to_json(self) -> JSON:
return {'setup value': 'element lvalue',
'base': self.base.to_json(), 'index': self.index}
class FieldVal(SetupVal):
base : SetupVal
field_name : str
def __init__(self, base : SetupVal, field_name : str) -> None:
self.base = base
self.field_name = field_name
def to_json(self) -> JSON:
return {'setup value': 'field',
'base': self.base.to_json(), 'field': self.field_name}
class GlobalInitializerVal(SetupVal):
name : str
def __init__(self, name : str) -> None:
self.name = name
def to_json(self) -> JSON:
return {'setup value': 'global initializer', 'name': self.name}
class GlobalVarVal(SetupVal):
name : str
def __init__(self, name : str) -> None:
self.name = name
def to_json(self) -> JSON:
return {'setup value': 'global lvalue', 'name': self.name}
class NullVal(SetupVal):
def to_json(self) -> JSON:
return {'setup value': 'null'}
class ArrayVal(SetupVal):
elements : List[SetupVal]
def __init__(self, elements : List[SetupVal]) -> None:
self.elements = elements
def to_json(self) -> JSON:
return {'setup value': 'array',
'elements': [element.to_json() for element in self.elements]}
name_regexp = re.compile('^(?P<prefix>.*[^0-9])?(?P<number>[0-9]+)?$')
def next_name(x : str) -> str:
match = name_regexp.match(x)
if match is None:
return 'x'
prefix, number = match.group('prefix', 'number')
if prefix is None:
prefix = 'x'
if number is None:
next_number = 0
else:
next_number = int(number) + 1
return f'{prefix}{next_number}'
def uniquify(x : str, used : Set[str]) -> str:
while x in used:
x = next_name(x)
return x
class PointerType:
"""A trivial class indicating that PointsTo should check ``target``'s type
against the type that ``pointer``'s type points to.
"""
pass
class Condition:
def __init__(self, condition : CryptolTerm) -> None:
self.cryptol_term = condition
def to_json(self) -> JSON:
return cryptoltypes.to_cryptol(self.cryptol_term)
class PointsTo:
"""The workhorse for ``points_to``.
"""
def __init__(self, pointer : SetupVal, target : SetupVal, *,
check_target_type : Union[PointerType, 'LLVMType', 'JVMType', None] = PointerType(),
condition : Optional[Condition] = None) -> None:
self.pointer = pointer
self.target = target
self.check_target_type = check_target_type
self.condition = condition
def to_json(self) -> JSON:
check_target_type_json: Optional[Dict[str, Any]]
if self.check_target_type is None:
check_target_type_json = None
elif isinstance(self.check_target_type, PointerType):
check_target_type_json = { "check against": "pointer type" }
elif isinstance(self.check_target_type, LLVMType):
check_target_type_json = { "check against": "casted type"
, "type": self.check_target_type.to_json() }
return {"pointer": self.pointer.to_json(),
"points to": self.target.to_json(),
"check points to type": check_target_type_json,
"condition": self.condition.to_json() if self.condition is not None else self.condition}
class PointsToBitfield:
"""The workhorse for ``points_to_bitfield``.
"""
def __init__(self, pointer : SetupVal, field_name : str,
target : SetupVal) -> None:
self.pointer = pointer
self.field_name = field_name
self.target = target
def to_json(self) -> Any:
return {"pointer": self.pointer.to_json(),
"field name": self.field_name,
"points to": self.target.to_json()}
@dataclass
class GhostVariable:
name: str
server_name: str
class GhostValue:
"""A class containing the statement that a given ghost variable should have the
value given by a Cryptol expression.
"""
def __init__(self, name: str, value: cryptoltypes.CryptolJSON) -> None:
self.name = name
self.value = value
def to_json(self) -> JSON:
return {"server name": self.name,
"value": cryptoltypes.to_cryptol(self.value)}
@dataclass
class State:
contract : 'Contract'
fresh : List[FreshVar] = dataclasses.field(default_factory=list)
conditions : List[Condition] = dataclasses.field(default_factory=list)
allocated : List[Allocated] = dataclasses.field(default_factory=list)
points_to : List[PointsTo] = dataclasses.field(default_factory=list)
points_to_bitfield : List[PointsToBitfield] = dataclasses.field(default_factory=list)
ghost_values : List[GhostValue] = dataclasses.field(default_factory=list)
def to_json(self) -> JSON:
return {'variables': [v.to_init_json() for v in self.fresh],
'conditions': [c.to_json() for c in self.conditions],
'allocated': [a.to_init_json() for a in self.allocated],
'points to': [p.to_json() for p in self.points_to],
'points to bitfield': [p.to_json() for p in self.points_to_bitfield],
'ghost values': [g.to_json() for g in self.ghost_values]
}
ContractState = \
Union[Literal['pre'],
Literal['post'],
Literal['done']]
@dataclass
class Void:
def to_json(self) -> JSON:
return None
void = Void()
@dataclass
class VerifyResult:
contract : 'Contract'
lemma_name : str
# Lemma names are generated deterministically with respect to a
# particular Python execution trace. This means that re-running the
# same script will be fast when using caching, but REPL-style usage
# will be slow, invalidating the cache at each step. We should be
# smarter about this.
used_lemma_names = set([]) # type: Set[str]
class Contract:
__used_names : Set[str]
__state : ContractState = 'pre'
__pre_state : State
__post_state : State
__returns : Optional[Union[SetupVal, Void]]
__arguments : Optional[List[SetupVal]]
__definition_lineno : Optional[int]
__definition_filename : Optional[str]
__unique_id : uuid.UUID
__cached_json : JSON
def __init__(self) -> None:
self.__pre_state = State(self)
self.__post_state = State(self)
self.__used_names = set()
self.__arguments = None
self.__returns = None
self.__unique_id = uuid.uuid4()
self.__cached_json = None
frame = inspect.currentframe()
if frame is not None and frame.f_back is not None:
self.__definition_lineno = frame.f_back.f_lineno
self.__definition_filename = frame.f_back.f_code.co_filename
else:
self.__definition_lineno = None
self.__definition_filename = None
# To be overridden by users
def specification(self) -> None:
pass
def execute_func(self, *args : SetupVal) -> None:
"""Denotes the end of the precondition specification portion of this ``Contract``, records that
the function is executed with arguments ``args``, and denotes the beginning of the postcondition
portion of this ``Contract``."""
if self.__arguments is not None:
raise ValueError("The function has already been called once during the specification.")
elif self.__state != 'pre':
raise ValueError("Contract state expected to be 'pre', but found {self.__state!r} (has `execute_func` already been called for this contract?).")
else:
self.__arguments = [arg for arg in args]
self.__state = 'post'
def get_fresh_name(self, hint : str = 'x') -> str:
new_name = uniquify(hint, self.__used_names)
self.__used_names.add(new_name)
return new_name
def fresh_var(self, type : Union['LLVMType','JVMType'], suggested_name : Optional[str] = None) -> FreshVar:
"""Declares a fresh variable of type ``type`` (with name ``suggested_name`` if provided and available)."""
fresh_name = self.get_fresh_name('x' if suggested_name is None else self.get_fresh_name(suggested_name))
v = FreshVar(self, type, fresh_name)
if self.__state == 'pre':
self.__pre_state.fresh.append(v)
elif self.__state == 'post':
self.__post_state.fresh.append(v)
else:
raise Exception("wrong state")
return v
def alloc(self, type : Union['LLVMType', 'JVMType'], *, read_only : bool = False,
alignment : Optional[int] = None,
points_to : Optional[SetupVal] = None) -> SetupVal:
"""Allocates a pointer of type ``type``.
If ``read_only == True`` then the allocated memory is immutable.
If ``alignment != None``, then the start of the allocated region of
memory will be aligned to a multiple of the specified number of bytes
(which must be a power of 2).
If ``points_to != None``, it will also be asserted that the allocated memory contains the
value specified by ``points_to``.
:returns A pointer of the proper type to the allocated region."""
a = Allocated(self, type, mutable = not read_only, alignment = alignment)
if self.__state == 'pre':
self.__pre_state.allocated.append(a)
elif self.__state == 'post':
self.__post_state.allocated.append(a)
else:
raise Exception("wrong state")
if points_to is not None:
self.points_to(a, points_to)
return a
def points_to(self, pointer : SetupVal, target : SetupVal, *,
check_target_type : Union[PointerType, 'LLVMType', 'JVMType', None] = PointerType(),
condition : Optional[Condition] = None) -> None:
"""Declare that the memory location indicated by the ``pointer``
contains the ``target``.
If ``check_target_type == PointerType()``, then this will check that
``target``'s type matches the type that ``pointer``'s type points to.
If ``check_target_type`` is an ``LLVMType``, then this will check that
``target``'s type matches that type.
If ``check_target_type == None`, then this will not check ``target``'s
type at all.
If ``condition != None`, then this will only declare that ``pointer``
points to ``target`` is the ``condition`` holds.
"""
pt = PointsTo(pointer, target, check_target_type = check_target_type, condition = condition)
if self.__state == 'pre':
self.__pre_state.points_to.append(pt)
elif self.__state == 'post':
self.__post_state.points_to.append(pt)
else:
raise Exception("wrong state")
def points_to_bitfield(self, pointer : SetupVal, field_name : str,
target : SetupVal) -> None:
"""Declare that the memory location indicated by the ``pointer``
is a bitfield whose field, indicated by the ``field_name``,
contains the ``target``.
Currently, this function only supports LLVM verification. Attempting to
use this function for JVM verification will result in an error.
"""
pt = PointsToBitfield(pointer, field_name, target)
if self.__state == 'pre':
self.__pre_state.points_to_bitfield.append(pt)
elif self.__state == 'post':
self.__post_state.points_to_bitfield.append(pt)
else:
raise Exception("wrong state")
def ghost_value(self, var: GhostVariable, value: cryptoltypes.CryptolJSON) -> None:
"""Declare that the given ghost variable should have a value specified by the given Cryptol expression.
Usable either before or after `execute_func`.
"""
gv = GhostValue(var.name, value)
if self.__state == 'pre':
self.__pre_state.ghost_values.append(gv)
elif self.__state == 'post':
self.__post_state.ghost_values.append(gv)
else:
raise Exception("wrong state")
@deprecated
def proclaim(self, proposition : Union[str, CryptolTerm, cryptoltypes.CryptolJSON]) -> None:
"""DEPRECATED: Use ``precondition`` or ``postcondition`` instead. This method will
eventually be removed."""
if not isinstance(proposition, CryptolTerm):
condition = Condition(CryptolTerm(proposition))
else:
condition = Condition(proposition)
if self.__state == 'pre':
self.__pre_state.conditions.append(condition)
elif self.__state == 'post':
self.__post_state.conditions.append(condition)
else:
raise Exception("wrong state")
def precondition(self, proposition : Union[str, CryptolTerm, cryptoltypes.CryptolJSON]) -> None:
"""Establishes ``proposition`` as a precondition for the function ```Contract```
being specified.
Preconditions must be specified before ``execute_func`` is called in the contract specification."""
if not isinstance(proposition, CryptolTerm):
condition = Condition(CryptolTerm(proposition))
else:
condition = Condition(proposition)
if self.__state == 'pre':
self.__pre_state.conditions.append(condition)
else:
raise Exception("preconditions must be specified before execute_func is called in the contract")
def precondition_f(self, s : str) -> None:
"""Declares a precondition using a ``cry_f``-style format string, i.e.
``precondition_f(...)`` is equivalent to ``precondition(cry_f(...))``"""
expression = to_cryptol_str_customf(s, frames=1, filename="<precondition_f>")
return self.precondition(expression)
def postcondition(self, proposition : Union[str, CryptolTerm, cryptoltypes.CryptolJSON]) -> None:
"""Establishes ``proposition`` as a postcondition for the function ```Contract```
being specified.
Postconditions must be specified after ``execute_func`` is called in the contract specification."""
if not isinstance(proposition, CryptolTerm):
condition = Condition(CryptolTerm(proposition))
else:
condition = Condition(proposition)
if self.__state == 'post':
self.__post_state.conditions.append(condition)
else:
raise Exception("postconditions must be specified after execute_func is called in the contract")
def postcondition_f(self, s : str) -> None:
"""Declares a postcondition using a ``cry_f``-style format string, i.e.
``postcondition_f(...)`` is equivalent to ``postcondition(cry_f(...))``"""
expression = to_cryptol_str_customf(s, frames=1, filename="<postcondition_f>")
return self.postcondition(expression)
def returns(self, val : Union[Void,SetupVal]) -> None:
if self.__state == 'post':
if self.__returns is None:
self.__returns = val
else:
raise ValueError("Return value already specified")
else:
raise ValueError("Not in postcondition")
def returns_f(self, s : str) -> None:
"""Declares a return value using a ``cry_f``-style format string, i.e.
``returns_f(...)`` is equivalent to ``returns(cry_f(...))``"""
expression = to_cryptol_str_customf(s, frames=1, filename="<returns_f>")
return self.returns(CryptolTerm(expression))
def lemma_name(self, hint : Optional[str] = None) -> str:
if hint is None:
hint = self.__class__.__name__
name = uniquify('lemma_' + hint, used_lemma_names)
used_lemma_names.add(name)
return name
def definition_lineno(self) -> Optional[int]:
return self.__definition_lineno
def definition_filename(self) -> Optional[str]:
return self.__definition_filename
def to_json(self) -> JSON:
if self.__cached_json is not None:
return self.__cached_json
else:
if self.__state != 'pre':
raise Exception(f'Internal error: wrong contract state -- expected \'pre\', but got: {self.__state!r}')
self.specification()
if self.__state != 'post':
raise Exception(f'Internal error: wrong contract state -- expected \'post\', but got: {self.__state!r}')
self.__state = 'done'
if self.__returns is None:
raise Exception("forgot return")
self.__cached_json = \
{'pre vars': [v.to_init_json() for v in self.__pre_state.fresh],
'pre conds': [c.to_json() for c in self.__pre_state.conditions],
'pre allocated': [a.to_init_json() for a in self.__pre_state.allocated],
'pre ghost values': [g.to_json() for g in self.__pre_state.ghost_values],
'pre points tos': [pt.to_json() for pt in self.__pre_state.points_to],
'pre points to bitfields': [pt.to_json() for pt in self.__pre_state.points_to_bitfield],
'argument vals': [a.to_json() for a in self.__arguments] if self.__arguments is not None else [],
'post vars': [v.to_init_json() for v in self.__post_state.fresh],
'post conds': [c.to_json() for c in self.__post_state.conditions],
'post allocated': [a.to_init_json() for a in self.__post_state.allocated],
'post ghost values': [g.to_json() for g in self.__post_state.ghost_values],
'post points tos': [pt.to_json() for pt in self.__post_state.points_to],
'post points to bitfields': [pt.to_json() for pt in self.__post_state.points_to_bitfield],
'return val': self.__returns.to_json()}
return self.__cached_json
##################################################
# Helpers for value construction
##################################################
# It's tempting to name this `global` to mirror SAWScript's `llvm_global`,
# but that would clash with the Python keyword `global`.
def global_var(name: str) -> SetupVal:
"""Returns a pointer to the named global ``name`` (i.e., a ``GlobalVarVal``)."""
return GlobalVarVal(name)
def cry(s : str) -> CryptolTerm:
"""Embed a string of Cryptol syntax as a ``CryptolTerm`` (which is also a
``SetupVal``) - also see ``cry_f``."""
return CryptolTerm(s)
def cry_f(s : str) -> CryptolTerm:
"""Embed a string of Cryptol syntax as a ``CryptolTerm`` (which is also a
``SetupVal``), where the given string is parsed as an f-string, and the
values within brackets are converted to Cryptol syntax using
``to_cryptol_str`` from the Cryptol Python library.
Like f-strings, values in brackets (``{``, ``}``) are parsed as python
expressions in the caller's context of local and global variables, and
to include a literal bracket in the final string, it must be doubled
(i.e. ``{{`` or ``}}``). The latter is needed when using explicit type
application or record syntax. For example, if ``x = [0,1]`` then
``cry_f('length `{{2}} {x}')`` is equal to ``cry('length `{2} [0,1]')``
and ``cry_f('{{ x = {x} }}')`` is equal to ``cry('{ x = [0,1] }')``.
When formatting Cryptol, it is recomended to use this function rather
than any of Python's built-in methods of string formatting (e.g.
f-strings, ``str.format``) as the latter will not always produce valid
Cryptol syntax. Specifically, this function differs from these methods
in the cases of ``BV``s, string literals, function application (this
function will add parentheses as needed), and dicts. For example,
``cry_f('{ {"x": 5, "y": 4} }')`` equals ``cry('{x = 5, y = 4}')``
but ``f'{ {"x": 5, "y": 4} }'`` equals ``'{"x": 5, "y": 4}'``. Only
the former is valid Cryptol syntax for a record.
Note that any conversion or format specifier will always result in the
argument being rendered as a Cryptol string literal with the conversion
and/or formating applied. For example, `cry('f {5}')` is equal to
``cry('f 5')`` but ``cry_f('f {5!s}')`` is equal to ``cry(`f "5"`)``
and ``cry_f('f {5:+.2%}')`` is equal to ``cry('f "+500.00%"')``.
:example:
>>> x = BV(size=7, value=1)
>>> y = cry_f('fun1 {x}')
>>> cry_f('fun2 {y}')
'fun2 (fun1 (1 : [7]))'
"""
return CryptolTerm(to_cryptol_str_customf(s, frames=1))
def array(*elements: SetupVal) -> SetupVal:
"""Returns an array with the provided ``elements`` (i.e., an ``ArrayVal``).
N.B., one or more ``elements`` must be provided.""" # FIXME why? document this here when we figure it out.
if len(elements) == 0:
raise ValueError('An array must be constructed with one or more elements')
for e in elements:
if not isinstance(e, SetupVal):
raise ValueError('array expected a SetupVal, but got {e!r}')
return ArrayVal(list(elements))
def elem(base: SetupVal, index: int) -> SetupVal:
"""Returns the value of the array element at position ``index`` in ``base`` (i.e., an ``ElemVal``).
Can also be created by using an ``int`` indexing key on a ``SetupVal``: ``base[index]``."""
if not isinstance(base, SetupVal):
raise ValueError('elem expected a SetupVal, but got {base!r}')
if not isinstance(index, int):
raise ValueError('elem expected an int, but got {index!r}')
return ElemVal(base, index)
def field(base : SetupVal, field_name : str) -> SetupVal:
"""Returns the value of struct ``base``'s field ``field_name`` (i.e., a ``FieldVal``).
Can also be created by using a ``str`` indexing key on a ``SetupVal``: ``base[field_name]``."""
if not isinstance(base, SetupVal):
raise ValueError('field expected a SetupVal, but got {base!r}')
if not isinstance(field_name, str):
raise ValueError('field expected a str, but got {field_name!r}')
return FieldVal(base, field_name)
def global_initializer(name: str) -> SetupVal:
"""Returns the initializer value of a named global ``name`` (i.e., a ``GlobalInitializerVal``)."""
if not isinstance(name, str):
raise ValueError('global_initializer expected a str naming a global value, but got {name!r}')
return GlobalInitializerVal(name)
def null() -> SetupVal:
"""Returns a null pointer value (i.e., a ``NullVal``)."""
return NullVal()
def struct(*fields : SetupVal) -> SetupVal:
"""Returns an LLVM structure value with the given ``fields`` (i.e., a ``StructVal``)."""
for field in fields:
if not isinstance(field, SetupVal):
raise ValueError('struct expected a SetupVal, but got {field!r}')
return StructVal(list(fields))
| {
"content_hash": "e665e19d5c9852e0f472cffd302509c6",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 156,
"avg_line_length": 40.21648044692738,
"alnum_prop": 0.6039590206633096,
"repo_name": "GaloisInc/saw-script",
"id": "5d5ff39a0c2feecb305b81ac60a9e0f7e935cc0a",
"size": "28795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saw-remote-api/python/saw_client/crucible.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1142"
},
{
"name": "C",
"bytes": "75732"
},
{
"name": "Coq",
"bytes": "247572"
},
{
"name": "Dockerfile",
"bytes": "8660"
},
{
"name": "Emacs Lisp",
"bytes": "31441"
},
{
"name": "Haskell",
"bytes": "4688303"
},
{
"name": "Java",
"bytes": "8893"
},
{
"name": "LLVM",
"bytes": "165350"
},
{
"name": "Lex",
"bytes": "14870"
},
{
"name": "Makefile",
"bytes": "13802"
},
{
"name": "Python",
"bytes": "181486"
},
{
"name": "Rust",
"bytes": "48024"
},
{
"name": "Shell",
"bytes": "17209"
},
{
"name": "TeX",
"bytes": "4076"
},
{
"name": "Vim Script",
"bytes": "6159"
},
{
"name": "XSLT",
"bytes": "703"
},
{
"name": "Yacc",
"bytes": "39562"
}
],
"symlink_target": ""
} |
"""Tests for the log2timeline CLI tool."""
import collections
import os
import platform
import unittest
from plaso.containers import events
from plaso.cli import log2timeline_tool
from plaso.lib import definitions
from plaso.lib import errors
from plaso.storage.sqlite import sqlite_file
from tests import test_lib as shared_test_lib
from tests.cli import test_lib
class Log2TimelineToolTest(test_lib.CLIToolTestCase):
"""Tests for the log2timeline CLI tool."""
# pylint: disable=protected-access
_BDE_PASSWORD = 'bde-TEST'
_OUTPUT_ENCODING = 'utf-8'
def _CheckOutput(self, output, expected_output):
"""Compares the output against the expected output.
The actual processing time is ignored, since it can vary.
Args:
output (str): tool output.
expected_output (list[str]): expected tool output.
"""
output = output.split('\n')
self.assertEqual(output[:3], expected_output[:3])
self.assertTrue(output[3].startswith('Processing time\t\t: '))
self.assertEqual(output[4:], expected_output[4:])
def _CreateExtractionOptions(self, source_path, password=None):
"""Create options for testing extraction.
Args:
source_path (str): path of the source (test) data.
password (Optional[str]): password to unlock test data.
Returns:
TestOptions: options for testing extraction.
"""
options = test_lib.TestOptions()
options.artifact_definitions_path = self._GetTestFilePath(['artifacts'])
options.data_location = shared_test_lib.DATA_PATH
options.quiet = True
options.single_process = True
options.status_view_mode = 'none'
options.source = source_path
if password:
options.credentials = ['password:{0:s}'.format(password)]
return options
# TODO: add tests for _CheckStorageFile
# TODO: add tests for _CreateProcessingConfiguration
def testGetPluginData(self):
"""Tests the _GetPluginData function."""
test_tool = log2timeline_tool.Log2TimelineTool()
test_tool._data_location = self._GetTestFilePath([])
plugin_info = test_tool._GetPluginData()
self.assertIn('Hashers', plugin_info)
available_hasher_names = [name for name, _ in plugin_info['Hashers']]
self.assertIn('sha256', available_hasher_names)
self.assertIn('sha1', available_hasher_names)
self.assertIn('Parsers', plugin_info)
self.assertIsNotNone(plugin_info['Parsers'])
self.assertIn('Parser Plugins', plugin_info)
self.assertIsNotNone(plugin_info['Parser Plugins'])
def CheckEventCounters(self, storage_file, expected_event_counters):
"""Asserts that the number of events per data type matches.
Args:
storage_file (StorageFile): storage file.
expected_event_counters (dict[str, int|list[int]]): expected event
counters per event data type.
"""
event_counters = collections.Counter()
for event in storage_file.GetSortedEvents():
event_data_identifier = event.GetEventDataIdentifier()
event_data = storage_file.GetAttributeContainerByIdentifier(
events.EventData.CONTAINER_TYPE, event_data_identifier)
event_counters[event_data.data_type] += 1
for data_type, expected_event_count in expected_event_counters.items():
event_count = event_counters.pop(data_type, 0)
if isinstance(expected_event_count, list):
self.assertIn(event_count, expected_event_count)
else:
self.assertEqual(event_count, expected_event_count)
# Ensure there are no events left unaccounted for.
self.assertEqual(event_counters, collections.Counter())
# TODO: add tests for _PrintProcessingSummary
def testParseArguments(self):
"""Tests the ParseArguments function."""
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
result = test_tool.ParseArguments([])
self.assertFalse(result)
# TODO: check output.
# TODO: improve test coverage.
def testParseOptions(self):
"""Tests the ParseOptions function."""
test_artifacts_path = self._GetTestFilePath(['artifacts'])
self._SkipIfPathNotExists(test_artifacts_path)
test_file_path = self._GetTestFilePath(['testdir'])
self._SkipIfPathNotExists(test_file_path)
yara_rules_path = self._GetTestFilePath(['rules.yara'])
self._SkipIfPathNotExists(yara_rules_path)
options = test_lib.TestOptions()
options.artifact_definitions_path = test_artifacts_path
options.source = test_file_path
options.storage_file = 'storage.plaso'
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
options.yara_rules_path = yara_rules_path
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
test_tool.ParseOptions(options)
self.assertIsNotNone(test_tool._yara_rules_string)
options = test_lib.TestOptions()
options.artifact_definitions_path = test_artifacts_path
# ParseOptions will raise if source is not set.
with self.assertRaises(errors.BadConfigOption):
test_tool.ParseOptions(options)
options = test_lib.TestOptions()
options.artifact_definitions_path = test_artifacts_path
options.source = test_file_path
with self.assertRaises(errors.BadConfigOption):
test_tool.ParseOptions(options)
# TODO: improve test coverage.
def testExtractEventsFromSourcesOnDirectory(self):
"""Tests the ExtractEventsFromSources function on a directory."""
test_file_path = self._GetTestFilePath(['testdir'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: directory',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesOnAPFSImage(self):
"""Tests the ExtractEventsFromSources function on APFS image."""
test_file_path = self._GetTestFilePath(['apfs.dmg'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesOnBDEImage(self):
"""Tests the ExtractEventsFromSources function on BDE image."""
test_file_path = self._GetTestFilePath(['bdetogo.raw'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(
test_file_path, password=self._BDE_PASSWORD)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesOnCompressedDMGImage(self):
"""Tests the ExtractEventsFromSources function on a compressed DMG image."""
test_file_path = self._GetTestFilePath(['hfsplus_zlib.dmg'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesImage(self):
"""Tests the ExtractEventsFromSources function on single partition image."""
test_file_path = self._GetTestFilePath(['ímynd.dd'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesPartitionedImage(self):
"""Tests the ExtractEventsFromSources function on multi partition image."""
# Note that the source file is a RAW (VMDK flat) image.
test_file_path = self._GetTestFilePath(['multi_partition_image.vmdk'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
options.partitions = 'all'
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesOnVSSImage(self):
"""Tests the ExtractEventsFromSources function on VSS image."""
test_file_path = self._GetTestFilePath(['vsstest.qcow2'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
options.unattended = True
options.vss_stores = 'all'
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: storage media image',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'Number of warnings generated while extracting events: 3.',
'',
'Use pinfo to inspect warnings in more detail.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesOnFile(self):
"""Tests the ExtractEventsFromSources function on a file."""
test_file_path = self._GetTestFilePath(['System.evtx'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: single file',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
@unittest.skipIf(platform.system() == 'Windows', 'not supported on Windows')
@unittest.skipIf(
platform.release().endswith('Microsoft'),
'not supported on Windows Subsystem for Linux')
def testExtractEventsFromSourcesOnLinkToDirectory(self):
"""Tests the ExtractEventsFromSources function on a symlink to directory."""
test_file_path = self._GetTestFilePath(['link_to_testdir'])
self._SkipIfPathNotExists(test_file_path)
options = self._CreateExtractionOptions(test_file_path)
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: directory',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
@unittest.skipIf(platform.system() == 'Windows', 'not supported on Windows')
@unittest.skipIf(
platform.release().endswith('Microsoft'),
'not supported on Windows Subsystem for Linux')
def testExtractEventsFromSourcesOnLinkToFile(self):
"""Tests the ExtractEventsFromSources function on a symlink to file."""
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
source_path = self._GetTestFilePath(['link_to_System.evtx'])
options = self._CreateExtractionOptions(source_path)
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
expected_output = [
'',
'Source path\t\t: {0:s}'.format(options.source),
'Source type\t\t: single file',
'Processing time\t\t: 00:00:00',
'',
'Processing started.',
'Processing completed.',
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testExtractEventsFromSourcesWithFilestat(self):
"""Tests the ExtractEventsFromSources function with filestat parser."""
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
source_path = self._GetTestFilePath(['test_pe.exe'])
options = self._CreateExtractionOptions(source_path)
options.parsers = 'filestat,pe'
with shared_test_lib.TempDirectory() as temp_directory:
options.storage_file = os.path.join(temp_directory, 'storage.plaso')
options.storage_format = definitions.STORAGE_FORMAT_SQLITE
options.task_storage_format = definitions.STORAGE_FORMAT_SQLITE
test_tool.ParseOptions(options)
test_tool.ExtractEventsFromSources()
storage_file = sqlite_file.SQLiteStorageFile()
try:
storage_file.Open(path=options.storage_file, read_only=True)
except IOError as exception:
self.fail((
'Unable to open storage file after processing with error: '
'{0!s}.').format(exception))
# There should be 3 filestat and 3 pe parser generated events.
# Typically there are 3 filestat events, but there can be 4 on platforms
# that support os.stat_result st_birthtime.
expected_event_counters = {
'fs:stat': [3, 4],
'pe_coff:dll_import': 2,
'pe_coff:file': 1}
self.CheckEventCounters(storage_file, expected_event_counters)
def testShowInfo(self):
"""Tests the output of the tool in info mode."""
output_writer = test_lib.TestOutputWriter(encoding=self._OUTPUT_ENCODING)
test_tool = log2timeline_tool.Log2TimelineTool(output_writer=output_writer)
options = test_lib.TestOptions()
options.artifact_definitions_path = self._GetTestFilePath(['artifacts'])
options.show_info = True
test_tool.ParseOptions(options)
test_tool.ShowInfo()
output = output_writer.ReadOutput()
section_headings = [
'Hashers', 'Parsers', 'Parser Plugins', 'Parser Presets',
'Versions']
for heading in section_headings:
self.assertIn(heading, output)
self.assertNotIn('<class', output)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6d13a2ad3b13e56ae8b2f36edb61758d",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 80,
"avg_line_length": 35.884413309982484,
"alnum_prop": 0.6838945827232796,
"repo_name": "joachimmetz/plaso",
"id": "fad359ceed1279bb9b0658ae9e512a373298c771",
"size": "20538",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/cli/log2timeline_tool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345755"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
} |
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from dateutil.relativedelta import MO, FR
from holidays.constants import SUN, JAN, MAY, JUN, OCT, DEC
from holidays.holiday_base import HolidayBase
class Kenya(HolidayBase):
"""
https://en.wikipedia.org/wiki/Public_holidays_in_Kenya
http://kenyaembassyberlin.de/Public-Holidays-in-Kenya.48.0.html
https://www.officeholidays.com/holidays/kenya/moi-day
"""
country = "KE"
def _populate(self, year):
super()._populate(year)
# Public holidays
self[date(year, JAN, 1)] = "New Year's Day"
self[date(year, MAY, 1)] = "Labour Day"
self[date(year, JUN, 1)] = "Madaraka Day"
self[date(year, OCT, 10)] = "Huduma Day"
self[date(year, OCT, 20)] = "Mashujaa Day"
self[date(year, DEC, 12)] = "Jamhuri (Independence) Day"
self[date(year, DEC, 25)] = "Christmas Day"
self[date(year, DEC, 26)] = "Utamaduni Day"
for k, v in list(self.items()):
if self.observed and k.weekday() == SUN:
self[k + rd(days=1)] = v + " (Observed)"
self[easter(year) - rd(weekday=FR(-1))] = "Good Friday"
self[easter(year) + rd(weekday=MO(+1))] = "Easter Monday"
class KE(Kenya):
pass
class KEN(Kenya):
pass
| {
"content_hash": "64b885b6192509e000b49c7839894c0e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 30.288888888888888,
"alnum_prop": 0.6206896551724138,
"repo_name": "dr-prodigy/python-holidays",
"id": "c805957f0305a52c5cea0c2500fab2d93cd83bb4",
"size": "1840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holidays/countries/kenya.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1134423"
}
],
"symlink_target": ""
} |
from .development import *
| {
"content_hash": "e00a2294b12ebef08c6e06df2affe7b7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.7777777777777778,
"repo_name": "zwidny/djtemplate1_8",
"id": "c4acb52c5eb4abdeef847c5951a0a9ed1fc8e33f",
"size": "51",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/project_name/settings/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2031"
},
{
"name": "Python",
"bytes": "6420"
}
],
"symlink_target": ""
} |
from .cqlhandling import CqlParsingRuleSet, Hint
from cassandra.metadata import maybe_escape_name
from cassandra.metadata import escape_name
simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'date', 'decimal', 'double', 'float', 'inet', 'int',
'smallint', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'uuid', 'varchar', 'varint'))
simple_cql_types.difference_update(('set', 'map', 'list'))
from . import helptopics
cqldocs = helptopics.CQL3HelpTopics()
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed')
NONALTERBALE_KEYSPACES = ('system', 'system_schema')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'use', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'function', 'aggregate', 'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'date', 'time', 'timestamp', 'ttl', 'alter', 'add', 'type',
'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering',
'token', 'writetime', 'map', 'list', 'to', 'custom', 'if', 'not',
'materialized', 'view'
))
unreserved_keywords = set((
'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values', 'custom', 'exists'
))
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('min_index_interval', None),
('max_index_interval', None),
('read_repair_chance', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
('caching', None,
('rows_per_partition', 'keys')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
maybe_escape_name = staticmethod(maybe_escape_name)
escape_name = staticmethod(escape_name)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
return name[1:-1].replace('""', '"')
else:
return name.lower()
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
completer_for = CqlRuleSet.completer_for
explain_completion = CqlRuleSet.explain_completion
dequote_value = CqlRuleSet.dequote_value
dequote_name = CqlRuleSet.dequote_name
escape_value = CqlRuleSet.escape_value
maybe_escape_name = CqlRuleSet.maybe_escape_name
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= <quotedStringLiteral>
| <pgStringLiteral> ;
<quotedStringLiteral> ::= /'([^']|'')*'/ ;
<pgStringLiteral> ::= /\$\$.*\$\$/;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <collectionLiteral>
| <functionName> <functionArguments>
| "NULL"
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<anyFunctionName> ::= ( ksname=<cfOrKsName> dot="." )? udfname=<cfOrKsName> ;
<userFunctionName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udfname=<cfOrKsName> ;
<refUserFunctionName> ::= udfname=<cfOrKsName> ;
<userAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udaname=<cfOrKsName> ;
<functionAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? functionname=<cfOrKsName> ;
<aggregateName> ::= <userAggregateName>
;
<functionName> ::= <functionAggregateName>
| "TOKEN"
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <createMaterializedViewStatement>
| <createUserTypeStatement>
| <createFunctionStatement>
| <createAggregateStatement>
| <createTriggerStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <dropMaterializedViewStatement>
| <dropUserTypeStatement>
| <dropFunctionStatement>
| <dropAggregateStatement>
| <dropTriggerStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
| <alterUserTypeStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
| <createRoleStatement>
| <alterRoleStatement>
| <dropRoleStatement>
| <listRolesStatement>
;
<authorizationStatement> ::= <grantStatement>
| <grantRoleStatement>
| <revokeStatement>
| <revokeRoleStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | <K_TIMESTAMP> ) ;
<userType> ::= utname=<cfOrKsName> ;
<storageType> ::= <simpleStorageType> | <collectionType> | <frozenCollectionType> | <userType> ;
# Note: autocomplete for frozen collection types does not handle nesting past depth 1 properly,
# but that's a lot of work to fix for little benefit.
<collectionType> ::= "map" "<" <simpleStorageType> "," ( <simpleStorageType> | <userType> ) ">"
| "list" "<" ( <simpleStorageType> | <userType> ) ">"
| "set" "<" ( <simpleStorageType> | <userType> ) ">"
;
<frozenCollectionType> ::= "frozen" "<" "map" "<" <storageType> "," <storageType> ">" ">"
| "frozen" "<" "list" "<" <storageType> ">" ">"
| "frozen" "<" "set" "<" <storageType> ">" ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<userTypeName> ::= ( ksname=<cfOrKsName> dot="." )? utname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( <K_KEY>
| <K_CLUSTERING>
| <K_TTL>
| <K_COMPACT>
| <K_STORAGE>
| <K_TYPE>
| <K_VALUES> )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<term>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if this_opt == 'caching':
return ["{'keys': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'min_index_interval', 'max_index_interval'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'caching':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts.add('min_sstable_size')
opts.add('min_threshold')
opts.add('bucket_high')
opts.add('bucket_low')
elif csc == 'LeveledCompactionStrategy':
opts.add('sstable_size_in_mb')
elif csc == 'DateTieredCompactionStrategy':
opts.add('base_time_seconds')
opts.add('max_sstable_age_days')
opts.add('timestamp_resolution')
opts.add('min_threshold')
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
elif opt == 'caching':
if key == 'rows_per_partition':
return ["'ALL'", "'NONE'", Hint('#rows_per_partition')]
elif key == 'keys':
return ["'ALL'", "'NONE'"]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
completer_for('columnFamilyName', 'ksname')(cf_ks_name_completer)
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
completer_for('columnFamilyName', 'dot')(cf_ks_dot_completer)
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
completer_for('userTypeName', 'ksname')(cf_ks_name_completer)
completer_for('userTypeName', 'dot')(cf_ks_dot_completer)
def ut_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
utnames = cass.get_usertype_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, utnames)
completer_for('userTypeName', 'utname')(ut_name_completer)
completer_for('userType', 'utname')(ut_name_completer)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_table_meta(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_table_meta(ks, cf)
def get_ut_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
ut = dequote_name(ctxt.get_binding('utname'))
return cass.get_usertype_layout(ks, ut)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" ( "JSON" )? <selectClause>
"FROM" cf=<columnFamilyName>
( "WHERE" <whereClause> )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "LIMIT" limit=<wholenumber> )?
( "ALLOW" "FILTERING" )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "[" <term> "]" )? ( "=" | "<" | ">" | "<=" | ">=" | "CONTAINS" ( "KEY" )? ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
;
<udtSubfieldSelection> ::= <identifier> "." <identifier>
;
<selector> ::= [colname]=<cident>
| <udtSubfieldSelection>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| "COUNT" "(" star=( "*" | "1" ) ")"
| <functionName> <selectionFunctionArguments>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
'''
def udf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udfnames = cass.get_userfunction_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udfnames)
def uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udanames = cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udanames)
def udf_uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
functionnames = cass.get_userfunction_names(ks) + cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, functionnames)
def ref_udf_name_completer(ctxt, cass):
try:
udanames = cass.get_userfunction_names(None)
except Exception:
return ()
return map(maybe_escape_name, udanames)
completer_for('functionAggregateName', 'ksname')(cf_ks_name_completer)
completer_for('functionAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('functionAggregateName', 'functionname')(udf_uda_name_completer)
completer_for('anyFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('anyFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('anyFunctionName', 'udfname')(udf_name_completer)
completer_for('userFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('userFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('userFunctionName', 'udfname')(udf_name_completer)
completer_for('refUserFunctionName', 'udfname')(ref_udf_name_completer)
completer_for('userAggregateName', 'ksname')(cf_ks_dot_completer)
completer_for('userAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('userAggregateName', 'udaname')(uda_name_completer)
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_table_meta(ctxt, cass)
order_by_candidates = [col.name for col in layout.clustering_key]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return [key.name for key in layout.partition_key]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
filterable = set((layout.partition_key[0].name, layout.clustering_key[0].name))
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs', ()))
for num in range(1, len(layout.partition_key)):
if layout.partition_key[num - 1].name in already_filtered_on:
filterable.add(layout.partition_key[num].name)
else:
break
for num in range(1, len(layout.clustering_key)):
if layout.clustering_key[num - 1].name in already_filtered_on:
filterable.add(layout.clustering_key[num].name)
else:
break
for cd in layout.columns.values():
if cd.index:
filterable.add(cd.name)
return map(maybe_escape_name, filterable)
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
( ( "(" [colname]=<cident> ( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<term> ( valcomma="," [newval]=<term> )* valcomma=")")
| ("JSON" <stringLiteral>))
( "IF" "NOT" "EXISTS")?
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
def regular_column_names(table_meta):
if not table_meta or not table_meta.columns:
return []
regular_coulmns = list(set(table_meta.columns.keys())
- set([key.name for key in table_meta.partition_key])
- set([key.name for key in table_meta.clustering_key]))
return regular_coulmns
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key
for k in keycols:
if k.name not in colnames:
return [maybe_escape_name(k.name)]
normalcols = set(regular_column_names(layout)) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
cqltype.cql_parameterized_type()))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ))?
;
<assignment> ::= updatecol=<cident>
( "=" update_rhs=( <term> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )?
| indexbracket="[" <term> "]" "=" <term> )
;
<conditions> ::= <condition> ( "AND" <condition> )*
;
<condition> ::= <cident> ( "[" <term> "]" )? ( ( "=" | "<" | ">" | "<=" | ">=" | "!=" ) <term>
| "IN" "(" <term> ( "," <term> )* ")")
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % cqltype.cql_parameterized_type())]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.columns[curcol].data_type.typename == 'counter' else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.columns[curcol].data_type.typename == 'counter':
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].data_type.typename
if coltype in ('map', 'list'):
return ['[']
return []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ) )?
;
<deleteSelector> ::= delcol=<cident> ( memberbracket="[" memberselector=<term> "]" )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <storageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <storageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<idxName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<idxName>? "ON"
cf=<columnFamilyName> "(" (
col=<cident> |
"keys(" col=<cident> ")" |
"full(" col=<cident> ")"
) ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
<createMaterializedViewStatement> ::= "CREATE" "MATERIALIZED" "VIEW" ("IF" "NOT" "EXISTS")? <columnFamilyName>?
"AS" <selectStatement>
"PRIMARY" "KEY" <pkDef>
;
<createUserTypeStatement> ::= "CREATE" "TYPE" ( ks=<nonSystemKeyspaceName> dot="." )? typename=<cfOrKsName> "(" newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
")"
;
<createFunctionStatement> ::= "CREATE" ("OR" "REPLACE")? "FUNCTION"
("IF" "NOT" "EXISTS")?
<userFunctionName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )?
("RETURNS" "NULL" | "CALLED") "ON" "NULL" "INPUT"
"RETURNS" <storageType>
"LANGUAGE" <cident> "AS" <stringLiteral>
;
<createAggregateStatement> ::= "CREATE" ("OR" "REPLACE")? "AGGREGATE"
("IF" "NOT" "EXISTS")?
<userAggregateName>
( "("
( <storageType> ( "," <storageType> )* )?
")" )?
"SFUNC" <refUserFunctionName>
"STYPE" <storageType>
( "FINALFUNC" <refUserFunctionName> )?
( "INITCOND" <term> )?
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
explain_completion('createUserTypeStatement', 'typename', '<new_type_name>')
explain_completion('createUserTypeStatement', 'newcol', '<new_field_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = [cd.name for cd in layout.columns.values() if not cd.index]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<indexName> ::= ( ksname=<idxOrKsName> dot="." )? idxname=<idxOrKsName> ;
<idxOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? idx=<indexName>
;
<dropMaterializedViewStatement> ::= "DROP" "MATERIALIZED" "VIEW" ("IF" "EXISTS")? mv=<columnFamilyName>
;
<dropUserTypeStatement> ::= "DROP" "TYPE" ut=<userTypeName>
;
<dropFunctionStatement> ::= "DROP" "FUNCTION" ( "IF" "EXISTS" )? <userFunctionName>
;
<dropAggregateStatement> ::= "DROP" "AGGREGATE" ( "IF" "EXISTS" )? <userAggregateName>
;
'''
@completer_for('indexName', 'ksname')
def idx_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('indexName', 'dot')
def idx_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('indexName', 'idxname')
def idx_ks_idx_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
idxnames = cass.get_index_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, idxnames)
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
<alterTypeInstructions>
;
<alterTypeInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
cols = [str(md) for md in layout.columns]
return map(maybe_escape_name, cols)
@completer_for('alterTypeInstructions', 'existcol')
def alter_type_field_completer(ctxt, cass):
layout = get_ut_layout(ctxt, cass)
fields = [tuple[0] for tuple in layout]
return map(maybe_escape_name, fields)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
explain_completion('alterTypeInstructions', 'newcol', '<new_field_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" ( "IF" "EXISTS" )? <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<rolename> ::= <identifier>
| <quotedName>
| <unreservedKeyword>
;
<createRoleStatement> ::= "CREATE" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<alterRoleStatement> ::= "ALTER" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<roleProperty> ::= "PASSWORD" "=" <stringLiteral>
| "OPTIONS" "=" <mapLiteral>
| "SUPERUSER" "=" <boolean>
| "LOGIN" "=" <boolean>
;
<dropRoleStatement> ::= "DROP" "ROLE" <rolename>
;
<grantRoleStatement> ::= "GRANT" <rolename> "TO" <rolename>
;
<revokeRoleStatement> ::= "REVOKE" <rolename> "FROM" <rolename>
;
<listRolesStatement> ::= "LIST" "ROLES"
( "OF" <rolename> )? "NORECURSIVE"?
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <rolename>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <rolename>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <rolename> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
| "DESCRIBE"
| "EXECUTE"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
| <roleResource>
| <functionResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
<roleResource> ::= ("ALL" "ROLES")
| ("ROLE" <rolename>)
;
<functionResource> ::= ( "ALL" "FUNCTIONS" ("IN KEYSPACE" <keyspaceName>)? )
| ( "FUNCTION" <functionAggregateName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )
)
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<username>')]
session = cass.session
return [maybe_quote(row.values()[0].replace("'", "''")) for row in session.execute("LIST USERS")]
@completer_for('rolename', 'role')
def rolename_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE ROLE.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<rolename>')]
session = cass.session
return [maybe_quote(row[0].replace("'", "''")) for row in session.execute("LIST ROLES")]
syntax_rules += r'''
<createTriggerStatement> ::= "CREATE" "TRIGGER" ( "IF" "NOT" "EXISTS" )? <cident>
"ON" cf=<columnFamilyName> "USING" class=<stringLiteral>
;
<dropTriggerStatement> ::= "DROP" "TRIGGER" ( "IF" "EXISTS" )? triggername=<cident>
"ON" cf=<columnFamilyName>
;
'''
explain_completion('createTriggerStatement', 'class', '\'fully qualified class name\'')
def get_trigger_names(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
return cass.get_trigger_names(ks)
@completer_for('dropTriggerStatement', 'triggername')
def alter_type_field_completer(ctxt, cass):
names = get_trigger_names(ctxt, cass)
return map(maybe_escape_name, names)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
| {
"content_hash": "49177f7171d53d6848408e89b5aa0c61",
"timestamp": "",
"source": "github",
"line_count": 1377,
"max_line_length": 141,
"avg_line_length": 36.98983297022513,
"alnum_prop": 0.5439481692352999,
"repo_name": "mt0803/cassandra",
"id": "a46da9135d190e5a6727ebbb1914f47a3d87e063",
"size": "51720",
"binary": false,
"copies": "6",
"ref": "refs/heads/trunk",
"path": "pylib/cqlshlib/cql3handling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "801"
},
{
"name": "Batchfile",
"bytes": "23765"
},
{
"name": "GAP",
"bytes": "67383"
},
{
"name": "Java",
"bytes": "11980524"
},
{
"name": "PigLatin",
"bytes": "4654"
},
{
"name": "PowerShell",
"bytes": "40270"
},
{
"name": "Python",
"bytes": "401847"
},
{
"name": "Shell",
"bytes": "54982"
},
{
"name": "Thrift",
"bytes": "40282"
}
],
"symlink_target": ""
} |
funcs = [
"abs", "all", "any", "ascii", "bin", "callable", "chr", "compile",
"delattr", "dir", "divmod", "eval", "exec", "exit", "format", "getattr",
"globals", "hasattr", "hash", "help", "hex", "id", "input", "isinstance",
"issubclass", "iter", "len", "locals", "max", "min", "next", "oct",
"open", "ord", "pow", "print", "quit", "repr", "round", "setattr",
"sorted", "sum", "vars"
]
classes = [
"bool", "bytearray", "bytes", "classmethod", "complex", "dict", "enumerate",
"filter", "float", "frozenset", "int", "list", "map", "memoryview",
"object", "property", "range", "reversed", "set", "slice", "staticmethod",
"str", "super", "tuple", "type", "zip"
]
special_cases = "exit", "quit", "help"
for func in funcs:
if func in special_cases:
continue
assert str(getattr(__builtins__, func)) == f"<built-in function {func}>"
for kl in classes:
obj = getattr(__builtins__, kl)
assert str(obj) == f"<class '{kl}'>", f"erreur pour {kl} : {obj}" | {
"content_hash": "986a275dd567c4c247b3d1fe38b3cab3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 38.76923076923077,
"alnum_prop": 0.5496031746031746,
"repo_name": "jonathanverner/brython",
"id": "aaecc4b9ce72ea3eed4a678d3dca4256e5f4cafd",
"size": "1008",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "www/tests/test_print.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17046"
},
{
"name": "HTML",
"bytes": "4989399"
},
{
"name": "JavaScript",
"bytes": "5841054"
},
{
"name": "Makefile",
"bytes": "61"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "14816501"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "387"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, absolute_import)
from django.db.models import CharField
from . import fields
class VATINField(CharField):
description = "A VIES VAT field."
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 14
super(VATINField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': fields.VATINField,
'required': not (self.blank or self.null)
}
defaults.update(kwargs)
return super(VATINField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [r"^vies\.models"])
except ImportError:
pass
| {
"content_hash": "7a91f9d088d4bed57f80314c8308daed",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 61,
"avg_line_length": 25.310344827586206,
"alnum_prop": 0.6362397820163488,
"repo_name": "vdboor/django-vies",
"id": "ed9b871cd31eab2c5296aa00ed16564b392661b8",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vies/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17901"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from geokey.contributions.models import Observation
from geokey.projects.tests.model_factories import ProjectF, UserF
from geokey.categories.tests.model_factories import (
CategoryFactory, LookupFieldFactory, LookupValueFactory,
TextFieldFactory, MultipleLookupFieldFactory, MultipleLookupValueFactory
)
from ..model_factories import ObservationFactory
class ObservationTest(TestCase):
def setUp(self):
self.creator = UserF.create()
ObservationFactory.create_batch(
5, **{'status': 'active', 'creator': self.creator})
ObservationFactory.create_batch(
5, **{'status': 'draft', 'creator': self.creator})
ObservationFactory.create_batch(
5, **{'status': 'pending', 'creator': self.creator})
ObservationFactory.create_batch(
5, **{'status': 'deleted', 'creator': self.creator})
def test_for_creator_moderator(self):
observations = Observation.objects.all().for_moderator(self.creator)
self.assertEqual(len(observations), 15)
for observation in observations:
self.assertNotIn(
observation.status,
['deleted']
)
def test_for_moderator(self):
observations = Observation.objects.all().for_moderator(UserF.create())
self.assertEqual(len(observations), 10)
for observation in observations:
self.assertNotIn(
observation.status,
['draft', 'deleted']
)
def test_for_viewer(self):
observations = Observation.objects.all().for_viewer(UserF.create())
self.assertEqual(len(observations), 5)
for observation in observations:
self.assertNotIn(
observation.status,
['draft', 'pending', 'deleted']
)
class TestSearch(TestCase):
def setUp(self):
o_type = CategoryFactory.create()
TextFieldFactory.create(**{'key': 'key', 'category': o_type})
ObservationFactory.create_batch(5, **{
'properties': {'key': 'blah'},
'category': o_type
})
ObservationFactory.create_batch(5, **{
'properties': {'key': 'blub'},
'category': o_type
})
def test_bl(self):
result = Observation.objects.all().search('bl')
self.assertEqual(len(result), 10)
def test_blah(self):
result = Observation.objects.all().search('blah')
self.assertEqual(len(result), 5)
for o in result:
self.assertEqual(o.properties.get('key'), 'blah')
def test_blub(self):
result = Observation.objects.all().search('blub')
self.assertEqual(len(result), 5)
for o in result:
self.assertEqual(o.properties.get('key'), 'blub')
def test_single_lookup(self):
project = ProjectF.create()
category = CategoryFactory.create(**{'project': project})
lookup = LookupFieldFactory.create(
**{'category': category, 'key': 'lookup'}
)
kermit = LookupValueFactory.create(**{
'field': lookup,
'name': 'Kermit'
})
gonzo = LookupValueFactory.create(**{
'field': lookup,
'name': 'Gonzo'
})
ObservationFactory.create_batch(3, **{
'project': project,
'category': category,
'properties': {'lookup': kermit.id}
})
ObservationFactory.create_batch(3, **{
'project': project,
'category': category,
'properties': {'lookup': gonzo.id}
})
result = project.observations.all().search('kermit')
self.assertEqual(len(result), 3)
for o in result:
self.assertEqual(o.properties.get('lookup'), kermit.id)
def test_multiple_lookup(self):
project = ProjectF.create()
category = CategoryFactory.create(**{'project': project})
lookup = MultipleLookupFieldFactory.create(
**{'category': category, 'key': 'lookup'}
)
kermit = MultipleLookupValueFactory.create(**{
'field': lookup,
'name': 'Kermit'
})
gonzo = MultipleLookupValueFactory.create(**{
'field': lookup,
'name': 'Gonzo'
})
piggy = MultipleLookupValueFactory.create(**{
'field': lookup,
'name': 'Ms Piggy'
})
ObservationFactory.create_batch(3, **{
'project': project,
'category': category,
'properties': {'lookup': [piggy.id, kermit.id]}
})
ObservationFactory.create_batch(3, **{
'project': project,
'category': category,
'properties': {'lookup': [gonzo.id]}
})
result = project.observations.all().search('kermit')
self.assertEqual(len(result), 3)
for o in result:
self.assertIn(kermit.id, o.properties.get('lookup'))
result = project.observations.all().search('piggy')
self.assertEqual(len(result), 3)
for o in result:
self.assertIn(kermit.id, o.properties.get('lookup'))
| {
"content_hash": "fcdc4386f1ea023ab4c72e517711e012",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 33.76129032258064,
"alnum_prop": 0.5730938276323333,
"repo_name": "nagyistoce/geokey",
"id": "2aaba5593ce356ee39f28ea950be2ba9bc800aa1",
"size": "5233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geokey/contributions/tests/observations/test_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15102"
},
{
"name": "HTML",
"bytes": "198094"
},
{
"name": "Handlebars",
"bytes": "7769"
},
{
"name": "JavaScript",
"bytes": "277022"
},
{
"name": "Python",
"bytes": "846818"
}
],
"symlink_target": ""
} |
from cyborg.tests.unit.api import base
class APITestV2(base.BaseApiTest):
PATH_PREFIX = '/v2'
| {
"content_hash": "2d74e4932e37b6efcc1cd76de7183f2b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 16.833333333333332,
"alnum_prop": 0.7227722772277227,
"repo_name": "openstack/nomad",
"id": "889bd3a43bb250f079bbe04362a8bf944b8fd09c",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyborg/tests/unit/api/controllers/v2/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='flaskr',
packages=['flaskr'],
include_package_data=True,
install_requires=[
'flask',
],
)
| {
"content_hash": "5817d6a2baa09a3c6236e4da34837ad6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 30,
"avg_line_length": 16.1,
"alnum_prop": 0.5962732919254659,
"repo_name": "chloeyangu/BigDataAnalytics",
"id": "eadb5914c5e1abb37965c5fada26242b7d3ab7c3",
"size": "161",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Terrorisks/flaskr/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23307"
},
{
"name": "HTML",
"bytes": "2991098"
},
{
"name": "JavaScript",
"bytes": "331"
},
{
"name": "Jupyter Notebook",
"bytes": "6246553"
},
{
"name": "Python",
"bytes": "5861"
}
],
"symlink_target": ""
} |
"""
This module instantiates the Flask application and declares the main error
handling function ``make_json_error``.
"""
# IMPORTANT: updates to the following must also be done in setup.py.
__title__ = "birdlistn"
__version__ = "0.1.0"
__author__ = "Hugues Demers"
__email__ = "hdemers@gmail.com"
__copyright__ = "Copyright 2013 Hugues Demers"
__license__ = "MIT"
import os
import traceback
from flask import Flask, jsonify
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from cloudly import logger
from cloudly.notify import notify as cloudly_notify
FORMAT = "%(asctime)s] %(levelname)s %(module)s %(funcName)s: %(message)s"
# The application
app = Flask(__name__)
# Debugging
app.debug = True
app.debug_log_format = FORMAT
log = logger.init(__name__)
# Set a 'SECRET_KEY' to enable the Flask session cookies
app.config['SECRET_KEY'] = os.environ.get("WEBAPP_SESSION_SECRET_KEY",
'oftg09jW2FtbXfcud9OS')
# Make this app a JSON app.
# Inspired from cf. http://flask.pocoo.org/snippets/83/
def make_json_error(ex):
log.error(ex)
log.error(traceback.format_exc())
message = ex.description if isinstance(ex, HTTPException) else str(ex)
message = message.replace("<p>", "").replace("</p>", "") if message else ""
code = ex.code if isinstance(ex, HTTPException) else 500
response = jsonify(message=message, status_code=code)
response.status_code = code
if code in [500]:
notify(ex, code)
return response
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
def notify(exception, code=None):
if not code:
code = exception.code if isinstance(exception, HTTPException) else 500
cloudly_notify("Exception: {}".format(code), "{}\n\n{}".format(
exception, traceback.format_exc(exception)))
import birdlistn.views # noqa
| {
"content_hash": "87053a2cb88bb1032ede9ae57cbed538",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 29.303030303030305,
"alnum_prop": 0.688210961737332,
"repo_name": "hdemers/birdlistn",
"id": "a39d412d1a480bd7499cde73e1fc3a2c75b448a1",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "birdlistn/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5797"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory
from tests.factories import UserFactory
from scripts.retract_registrations import main
class TestRetractRegistrations(OsfTestCase):
def setUp(self):
super(TestRetractRegistrations, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user)
self.registration.is_public = True
self.registration.retract_registration(self.user)
self.registration.save()
def test_new_retraction_should_not_be_retracted(self):
assert_false(self.registration.retraction.is_retracted)
main(dry_run=False)
assert_false(self.registration.retraction.is_retracted)
def test_should_not_retract_pending_retraction_less_than_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(hours=47)),
safe=True
)
# setattr(self.registration.retraction, 'initiation_date', (datetime.utcnow() - timedelta(hours=47)))
self.registration.retraction.save()
assert_false(self.registration.retraction.is_retracted)
main(dry_run=False)
assert_false(self.registration.retraction.is_retracted)
def test_should_retract_pending_retraction_that_is_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(hours=48)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.retraction.is_retracted)
main(dry_run=False)
assert_true(self.registration.retraction.is_retracted)
def test_should_retract_pending_retraction_more_than_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.retraction.is_retracted)
main(dry_run=False)
assert_true(self.registration.retraction.is_retracted)
def test_retraction_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.retraction.is_retracted)
main(dry_run=False)
assert_true(self.registration.retraction.is_retracted)
# Logs: Created, made public, retraction initiated, retracted approved
assert_equal(len(self.registration.registered_from.logs), initial_project_logs + 1)
| {
"content_hash": "e8b7dae6d0be605989b941af0687b14f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 109,
"avg_line_length": 40.41975308641975,
"alnum_prop": 0.6814294441050702,
"repo_name": "HarryRybacki/osf.io",
"id": "a1a4920e4dd7a17f74dd08a39d8e4fc611830dec",
"size": "3299",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "scripts/tests/test_retract_registrations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111556"
},
{
"name": "HTML",
"bytes": "32337"
},
{
"name": "JavaScript",
"bytes": "1082815"
},
{
"name": "Mako",
"bytes": "520606"
},
{
"name": "Python",
"bytes": "3063103"
},
{
"name": "Shell",
"bytes": "1735"
}
],
"symlink_target": ""
} |
subreddit = 'eminem'
t_channel = '@sub_eminem'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| {
"content_hash": "b5f525ee5a9d3a272be73eaf652672c7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 20,
"alnum_prop": 0.7083333333333334,
"repo_name": "Fillll/reddit2telegram",
"id": "af2a84da43bc0de65c40ee0b955468ffa1fbdcc8",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/sub_eminem/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
import subprocess
from loguru import logger
def adb(device_id, cmd=None):
logger.info("ADB: Running on {}:{}".format(device_id, cmd))
subprocess.run('adb -s {} {}'.format(device_id, cmd), shell=True)
| {
"content_hash": "8c06248b26e7f2ba7f759f917ccca3e4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 26.375,
"alnum_prop": 0.6682464454976303,
"repo_name": "androguard/androguard",
"id": "a60885186965b4ca25d601adb72dd51261847f81",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "androguard/pentest/adb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "118146"
},
{
"name": "Python",
"bytes": "1120041"
}
],
"symlink_target": ""
} |
from dragon.vm.tensorflow.contrib.learn.datasets.mnist import read_data_sets | {
"content_hash": "37b3dfe07ff5cb886f71793d785d53da",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 76,
"avg_line_length": 76,
"alnum_prop": 0.8552631578947368,
"repo_name": "neopenx/Dragon",
"id": "06d012c560ec402e6c244843db98770df048c7a6",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dragon/python/dragon/vm/tensorflow/examples/tutorials/mnist/input_data.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7082"
},
{
"name": "C++",
"bytes": "1024612"
},
{
"name": "CMake",
"bytes": "7849"
},
{
"name": "Cuda",
"bytes": "246400"
},
{
"name": "Makefile",
"bytes": "7409"
},
{
"name": "Python",
"bytes": "552459"
}
],
"symlink_target": ""
} |
"""Tests for structures."""
import unittest
from grow.common import structures
from operator import itemgetter
class AttributeDictTestCase(unittest.TestCase):
"""Test the attribute dict structure."""
def test_attributes(self):
"""Keys are accessible as attributes."""
obj = structures.AttributeDict({
'key': 'value',
})
self.assertEqual('value', obj['key'])
self.assertEqual('value', obj.key)
class DeepReferenceDictTestCase(unittest.TestCase):
"""Test the deep reference dict structure."""
def test_deep_reference(self):
"""Delimited keys are accessible."""
obj = structures.DeepReferenceDict({
'key': {
'sub_key': {
'value': 'foo',
}
},
})
self.assertEqual('foo', obj['key']['sub_key']['value'])
self.assertEqual('foo', obj['key.sub_key.value'])
def test_deep_reference_error(self):
"""Missing keys raise error."""
obj = structures.DeepReferenceDict({
'key': {},
})
with self.assertRaises(KeyError):
_ = obj['key.sub_key.value']
class SortedCollectionTestCase(unittest.TestCase):
"""Test the sorted collection structure."""
def setUp(self):
self.key = itemgetter(2)
self.coll = structures.SortedCollection(key=self.key)
for record in [
('roger', 'young', 30),
('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32)]:
self.coll.insert(record)
def test_clear(self):
"""Clears the collection."""
self.assertEqual(4, len(self.coll))
self.coll.clear()
self.assertEqual(0, len(self.coll))
def test_contains(self):
"""Contains matches."""
self.assertTrue(('roger', 'young', 30) in self.coll)
self.assertFalse(('bob', 'young', 30) in self.coll)
def test_copy(self):
"""Copies the collection."""
coll_copy = self.coll.copy()
self.assertEqual(4, len(self.coll))
self.assertEqual(4, len(coll_copy))
self.coll.insert(('roger', 'young', 30))
self.assertEqual(5, len(self.coll))
self.assertEqual(4, len(coll_copy))
def test_count(self):
"""Counts matches."""
self.assertEqual(1, self.coll.count(('roger', 'young', 30)))
self.coll.insert(('roger', 'young', 30))
self.assertEqual(2, self.coll.count(('roger', 'young', 30)))
def test_find(self):
"""Find first match."""
self.assertEqual(('angela', 'jones', 28), self.coll.find(28))
with self.assertRaises(ValueError):
self.coll.find(39)
def test_get_item(self):
"""Greater than equal."""
self.assertEqual(('bill', 'smith', 22), self.coll[0])
def test_ge(self):
"""Greater than equal."""
self.assertEqual(('angela', 'jones', 28), self.coll.find_ge(28))
with self.assertRaises(ValueError):
self.coll.find_ge(40)
def test_gt(self):
"""Greater than."""
self.assertEqual(('roger', 'young', 30), self.coll.find_gt(28))
with self.assertRaises(ValueError):
self.coll.find_gt(40)
def test_index(self):
"""Index from item."""
match = self.coll.find_gt(28)
self.assertEqual(2, self.coll.index(match))
def test_insert_right(self):
"""Index from item."""
self.assertEqual(1, self.coll.count(('roger', 'young', 30)))
self.coll.insert_right(('roger', 'young', 30))
self.assertEqual(2, self.coll.count(('roger', 'young', 30)))
def test_key(self):
"""Index from item."""
self.assertEqual(self.key, self.coll.key)
self.coll.key = itemgetter(0) # now sort by first name
self.assertEqual([('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)], list(self.coll))
def test_le(self):
"""Less than equal."""
self.assertEqual(('angela', 'jones', 28), self.coll.find_le(28))
with self.assertRaises(ValueError):
self.coll.find_le(10)
def test_lt(self):
"""Less than."""
self.assertEqual(('bill', 'smith', 22), self.coll.find_lt(28))
with self.assertRaises(ValueError):
self.coll.find_lt(10)
def test_remove(self):
"""Removes matches."""
item = ('roger', 'young', 30)
self.assertTrue(item in self.coll)
self.coll.remove(item)
self.assertFalse(item in self.coll)
def test_repr(self):
"""Output of repr."""
actual = repr(self.coll)
self.assertIn('SortedCollection(', actual)
self.assertIn("('bill', 'smith', 22)", actual)
self.assertIn("('angela', 'jones', 28)", actual)
self.assertIn("('roger', 'young', 30)", actual)
self.assertIn("('david', 'thomas', 32)", actual)
def test_sorting(self):
"""Collection is sorted."""
self.assertEqual([('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)], list(self.coll))
| {
"content_hash": "f7a0a621f50138702477374a8c3d7eb6",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 72,
"avg_line_length": 33.092592592592595,
"alnum_prop": 0.5441149039358328,
"repo_name": "grow/pygrow",
"id": "337d96aa57f1d5395c7bc4adcd9571c4a758ec6c",
"size": "5361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/common/structures_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
from desktop.lib.django_util import get_username_re_rule, get_groupname_re_rule
username_re = get_username_re_rule()
groupname_re = get_groupname_re_rule()
urlpatterns = patterns('useradmin.views',
url(r'^$', 'list_users'),
url(r'^users$', 'list_users'),
url(r'^groups$', 'list_groups'),
url(r'^permissions$', 'list_permissions'),
url(r'^users/edit/(?P<username>%s)$' % (username_re,), 'edit_user'),
url(r'^users/add_ldap_users$', 'add_ldap_users'),
url(r'^users/add_ldap_groups$', 'add_ldap_groups'),
url(r'^users/sync_ldap_users_groups$', 'sync_ldap_users_groups'),
url(r'^groups/edit/(?P<name>%s)$' % (groupname_re,), 'edit_group'),
url(r'^permissions/edit/(?P<app>.*)/(?P<priv>.*)$', 'edit_permission'),
url(r'^users/new$', 'edit_user', name="useradmin.new"),
url(r'^groups/new$', 'edit_group', name="useradmin.new_group"),
url(r'^users/delete', 'delete_user'),
url(r'^groups/delete$', 'delete_group'),
)
| {
"content_hash": "2e43b3eb5c8dbd166945f4dd4689836b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 43.130434782608695,
"alnum_prop": 0.6522177419354839,
"repo_name": "yongshengwang/builthue",
"id": "3e46335fae2a3294b1a093738d910e7f79622860",
"size": "1784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/useradmin/src/useradmin/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
} |
import json
import pytest
from f5_cccl.api import F5CloudServiceManager
from f5_cccl.exceptions import F5CcclConfigurationReadError
from f5_cccl.resource import ltm
from f5_cccl.resource.ltm.virtual import ApiVirtualServer
from f5_cccl.service.manager import ServiceConfigDeployer
from f5_cccl.service.config_reader import ServiceConfigReader
from mock import MagicMock
from mock import Mock
from mock import patch
class TestServiceConfigReader:
def setup(self):
self.partition = "Test"
svcfile_ltm = 'f5_cccl/schemas/tests/ltm_service.json'
with open(svcfile_ltm, 'r') as fp:
self.ltm_service = json.loads(fp.read())
svcfile_net = 'f5_cccl/schemas/tests/net_service.json'
with open(svcfile_net, 'r') as fp:
self.net_service = json.loads(fp.read())
def test_create_reader(self):
reader = ServiceConfigReader(
self.partition)
assert reader
assert reader._partition == self.partition
def test_get_config(self):
reader = ServiceConfigReader(self.partition)
config = reader.read_ltm_config(self.ltm_service, 0,
'marathon-bigip-ctlr-v1.2.1')
assert len(config.get('virtuals')) == 2
assert len(config.get('pools')) == 1
assert len(config.get('http_monitors')) == 1
assert len(config.get('https_monitors')) == 1
assert len(config.get('icmp_monitors')) == 1
assert len(config.get('tcp_monitors')) == 1
assert len(config.get('l7policies')) == 2
assert len(config.get('iapps')) == 1
config = reader.read_net_config(self.net_service, 0)
assert len(config.get('arps')) == 1
assert len(config.get('fdbTunnels')) == 1
assert len(config.get('userFdbTunnels')) == 1
def test_create_config_item_exception(self):
with patch.object(ApiVirtualServer, '__init__', side_effect=ValueError("test exception")):
reader = ServiceConfigReader(self.partition)
with pytest.raises(F5CcclConfigurationReadError) as e:
reader.read_ltm_config(self.ltm_service, 0,
'marathon-bigip-ctlr-v1.2.1')
| {
"content_hash": "b3c576464b0deb19522f3261401c3b53",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 98,
"avg_line_length": 37.74576271186441,
"alnum_prop": 0.6403233048944769,
"repo_name": "ryan-talley/f5-cccl",
"id": "bd3413544ffbcf8695d531e57c0a628e4205aa8a",
"size": "2842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_cccl/service/test/test_config_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "476250"
},
{
"name": "Shell",
"bytes": "5416"
}
],
"symlink_target": ""
} |
from django.contrib.gis.db.models import PointField
from django.db.models import CharField, FloatField
from django.contrib.gis.db.models.functions import GeoFunc, GeomOutputGeoFunc
class Length(GeoFunc):
""" ST_Length postgis function """
output_field = FloatField()
class SimplifyPreserveTopology(GeomOutputGeoFunc):
""" ST_SimplifyPreserveTopology postgis function """
class GeometryType(GeoFunc):
""" GeometryType postgis function """
output_field = CharField()
function = 'GeometryType'
class DumpGeom(GeomOutputGeoFunc):
""" ST_Dump postgis function returning only geometry. """
function = 'ST_Dump'
template = '(%(function)s(%(expressions)s))."geom"' # ST_DUMP return tuple as (path, geom). Keep geom only.
class StartPoint(GeoFunc):
""" ST_StartPoint postgis function """
output_field = PointField()
class EndPoint(GeoFunc):
""" ST_EndPoint postgis function """
output_field = PointField()
class Buffer(GeomOutputGeoFunc):
""" ST_Buffer postgis function """
pass
class Area(GeoFunc):
""" ST_Area postgis function """
output_field = FloatField()
| {
"content_hash": "9aeaf540a4024867d101dd9bf95ac676",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 112,
"avg_line_length": 25.84090909090909,
"alnum_prop": 0.7044854881266491,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "239b01896943f5b598c78bbe2e5a6599cc3db19a",
"size": "1137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geotrek/common/functions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
} |
from recipe_scrapers.woop import Woop
from tests import ScraperTest
class TestWoopScraper(ScraperTest):
scraper_class = Woop
def test_host(self):
self.assertEqual("woop.co.nz", self.harvester_class.host())
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
"Pan-seared beef",
)
def test_total_time(self):
self.assertEqual(20, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("2 servings", self.harvester_class.yields())
def test_ingredients(self):
self.assertCountEqual(
[
"1 pack of beef sirloin",
"1 pack of steamed brown rice",
"1 pot of Korean seasoning",
"1 pot of sesame drizzle",
"1 pot of kimchi",
"1 sachet of sesame sprinkle",
"1 pot of edamame and peas",
"1 capsicum",
"1 bag of bok choy",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"""1. TO PREPARE THE VEGETABLES: Slice the capsicum into ½ cm slices. Cut the root end off the bok choy, discard and slice into 1 cm slices. Finely slice the kimchi.
2. TO COOK THE KIMCHI RICE: Spray a non-stick frying pan with oil and place over a medium-high heat. Once hot add the sliced capsicum and cook for 2-3 mins stirring occasionally to create a char. Open the bag of steamed brown rice, squeezing gently on the bag to break up any large clumps and add to the pan with the bok choy, edamame and peas and cook for 1-2 mins. Pour in the Korean seasoning and add half the kimchi, stir and cook for 1-2 mins. Season with salt to taste, remove from the pan and cover to keep warm.
3. TO COOK THE BEEF SIRLOIN: Remove the beef sirloin from its packaging and pat dry with a paper towel. Respray the pan with oil and place over a high heat. Season the beef with salt and pepper and place in the hot pan. Cook for 2-3 mins each side for medium-rare, a little longer for well done. Rest for 1-2 mins before slicing.
TO SERVE: Spoon kimchi fried rice onto plates and top with sliced beef sirloin. Dollop with sesame drizzle and sprinkle remaining kimchi and sesame sprinkle on top.""",
self.harvester_class.instructions(),
)
def test_nutrients(self):
return self.assertEqual(
{
"Energy": "2322kj (554Kcal)",
"Protein": "43g",
"Carbohydrate": "43g",
"Fat": "23g",
"Contains": "Soy, Sesame, Gluten, Milk, Egg",
},
self.harvester_class.nutrients(),
)
def test_image(self):
self.assertEqual(
"https://woop.co.nz/media/catalog/product/cache/f4f005ad5960ef8c7b8a08a9a3fc244e/b/-/b-pan-seared-beef_s64dydtpoves1rbu.jpg",
self.harvester_class.image(),
)
| {
"content_hash": "6f7f44cdc9f9243035927083d157e570",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 519,
"avg_line_length": 46.9375,
"alnum_prop": 0.6284953395472703,
"repo_name": "hhursev/recipe-scraper",
"id": "5997c39235b9b8e5c641a065fb02f13664b5cd13",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_woop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
} |
from scaleiopy.scaleio import ScaleIO
from scaleiopy.api.scaleio.mapping.snapshotspecification import SIO_SnapshotSpecification
from pprint import pprint
import sys
# How to run:
# python create-snapshot-of-volume.py ip-to-gw user pass volume_name
sio = ScaleIO("https://" + sys.argv[1] + "/api",sys.argv[2],sys.argv[3],False,"ERROR") # HTTPS must be used as there seem to be an issue with 302 responses in Requests when using POST
#sio.create_volume_by_pd_name(sys.argv[4], 8192, sio.get_pd_by_name(sys.argv[5]), mapAll=True)
#pprint(sio.volumes)
snapSpec = SIO_SnapshotSpecification()
snapSpec.addVolume(sio.provisioning.get_volume_by_name(sys.argv[4]))
print "**********"
print "* Volume *"
print "**********"
pprint (sio.provisioning.get_volume_by_name(sys.argv[4]))
print "**********************"
print "Snapshot specification"
print "**********************"
pprint (snapSpec)
print "* Creating Snapshot"
#print "systemId = " + str(sio.get_system_id())
#pprint (sio.get_system_id())
result = sio.provisioning.create_snapshot(sio.get_system_id(), snapSpec)
pprint (result)
| {
"content_hash": "e96f75e50a31d34a772385075c0c6d0e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 183,
"avg_line_length": 35.096774193548384,
"alnum_prop": 0.6985294117647058,
"repo_name": "swevm/scaleio-py",
"id": "9562a6324313286cac23e15eed1820c22b740109",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/create-snapshot-of-volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "155067"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
from decimal import Decimal
import datetime
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QGridLayout
from electrum_ltc.i18n import _
from .util import WindowModalDialog, ButtonsLineEdit, ColorScheme, Buttons, CloseButton, MONOSPACE_FONT
if TYPE_CHECKING:
from .main_window import ElectrumWindow
class LightningTxDialog(WindowModalDialog):
def __init__(self, parent: 'ElectrumWindow', tx_item: dict):
WindowModalDialog.__init__(self, parent, _("Lightning Payment"))
self.parent = parent
self.is_sent = bool(tx_item['direction'] == 'sent')
self.label = tx_item['label']
self.timestamp = tx_item['timestamp']
self.amount = Decimal(tx_item['amount_msat']) / 1000
self.payment_hash = tx_item['payment_hash']
self.preimage = tx_item['preimage']
self.setMinimumWidth(700)
vbox = QVBoxLayout()
self.setLayout(vbox)
# FIXME fiat values here are using today's FX rate instead of historical
vbox.addWidget(QLabel(_("Amount") + ": " + self.parent.format_amount_and_units(self.amount)))
if self.is_sent:
fee = Decimal(tx_item['fee_msat']) / 1000
vbox.addWidget(QLabel(_("Fee") + ": " + self.parent.format_amount_and_units(fee)))
time_str = datetime.datetime.fromtimestamp(self.timestamp).isoformat(' ')[:-3]
vbox.addWidget(QLabel(_("Date") + ": " + time_str))
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
vbox.addWidget(QLabel(_("Payment hash") + ":"))
self.hash_e = ButtonsLineEdit(self.payment_hash)
self.hash_e.addCopyButton(self.parent.app)
self.hash_e.addButton(qr_icon,
self.show_qr(self.hash_e, _("Payment hash")),
_("Show QR Code"))
self.hash_e.setReadOnly(True)
self.hash_e.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.hash_e)
vbox.addWidget(QLabel(_("Preimage") + ":"))
self.preimage_e = ButtonsLineEdit(self.preimage)
self.preimage_e.addCopyButton(self.parent.app)
self.preimage_e.addButton(qr_icon,
self.show_qr(self.preimage_e, _("Preimage")),
_("Show QR Code"))
self.preimage_e.setReadOnly(True)
self.preimage_e.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.preimage_e)
vbox.addLayout(Buttons(CloseButton(self)))
def show_qr(self, line_edit, title=''):
def f():
text = line_edit.text()
try:
self.parent.show_qrcode(text, title, parent=self)
except Exception as e:
self.show_message(repr(e))
return f
| {
"content_hash": "370cfb571bd3008318498fa2279363da",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 103,
"avg_line_length": 40.357142857142854,
"alnum_prop": 0.6095575221238938,
"repo_name": "vialectrum/vialectrum",
"id": "10f3a7579d38e2e80c395a1387c64acad1ce9c5c",
"size": "3996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_ltc/gui/qt/lightning_tx_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "839"
},
{
"name": "NSIS",
"bytes": "7496"
},
{
"name": "Python",
"bytes": "1895270"
},
{
"name": "Shell",
"bytes": "16219"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class AddressList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ipv4_address: {"type": "string", "description": "IP address", "format": "ipv4-address"}
:param ipv4_netmask: {"type": "string", "description": "IP subnet mask", "format": "ipv4-netmask"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "address-list"
self.DeviceProxy = ""
self.ipv4_address = ""
self.ipv4_netmask = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelperAddressList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param helper_address: {"type": "string", "description": "Helper address for DHCP packets (IP address)", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "helper-address-list"
self.DeviceProxy = ""
self.helper_address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ip(A10BaseClass):
"""Class Description::
Global IP configuration subcommands.
Class ip supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param generate_membership_query: {"default": 0, "optional": true, "type": "number", "description": "Enable Membership Query", "format": "flag"}
:param address_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ipv4-address": {"type": "string", "description": "IP address", "format": "ipv4-address"}, "optional": true, "ipv4-netmask": {"type": "string", "description": "IP subnet mask", "format": "ipv4-netmask"}}}]}
:param inside: {"default": 0, "optional": true, "type": "number", "description": "Configure interface as inside", "format": "flag"}
:param allow_promiscuous_vip: {"default": 0, "optional": true, "type": "number", "description": "Allow traffic to be associated with promiscuous VIP", "format": "flag"}
:param max_resp_time: {"description": "Maximum Response Time (Max Response Time (Default is 100))", "format": "number", "default": 100, "optional": true, "maximum": 255, "minimum": 1, "type": "number"}
:param query_interval: {"description": "1 - 255 (Default is 125)", "format": "number", "default": 125, "optional": true, "maximum": 255, "minimum": 1, "type": "number"}
:param outside: {"default": 0, "optional": true, "type": "number", "description": "Configure interface as outside", "format": "flag"}
:param helper_address_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"helper-address": {"type": "string", "description": "Helper address for DHCP packets (IP address)", "format": "ipv4-address"}, "optional": true}}]}
:param dhcp: {"default": 0, "optional": true, "type": "number", "description": "Use DHCP to configure IP address", "format": "flag"}
:param slb_partition_redirect: {"default": 0, "optional": true, "type": "number", "description": "Redirect SLB traffic across partition", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ve/{ifnum}/ip`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ip"
self.a10_url="/axapi/v3/interface/ve/{ifnum}/ip"
self.DeviceProxy = ""
self.uuid = ""
self.generate_membership_query = ""
self.address_list = []
self.inside = ""
self.allow_promiscuous_vip = ""
self.max_resp_time = ""
self.query_interval = ""
self.outside = ""
self.helper_address_list = []
self.stateful_firewall = {}
self.rip = {}
self.router = {}
self.dhcp = ""
self.ospf = {}
self.slb_partition_redirect = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "d4ce65be902e6b3d044661873d689de1",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 340,
"avg_line_length": 45.00952380952381,
"alnum_prop": 0.6159542953872197,
"repo_name": "a10networks/a10sdk-python",
"id": "7e4954244d2b3e665b76539c1c87f77794cf5be2",
"size": "4726",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/interface/interface_ve_ip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
__author__ = 'digao'
from collections import defaultdict
import sys
import re
def rec_depth(graph,k,seen,parents,cycles):
parents.append(k)
seen.add(k)
for other in graph[k]:
if len(parents)>1 and other in parents:
index = parents.index(other)
if index < len(parents)-2:
cycles.add(frozenset(parents[index:]))
else:
rec_depth(graph,other,seen,parents,cycles)
def depth_cycles(graph):
seen = set()
cycles = set()
for k,v in graph.items():
if k in seen:
continue
rec_depth(graph,k,seen,[],cycles)
return cycles
input = sys.stdin
if sys.argv[1]:
input = open(sys.argv[1], 'r')
seen = set()
graph = defaultdict(lambda:set())
lines = input.readlines()
for line in lines:
time,u1,u2 = (s.strip() for s in line.split('\t'))
if (u2,u1) in seen:
graph[u1].add(u2)
graph[u2].add(u1)
else:
seen.add((u1,u2))
cycles = depth_cycles(graph)
result = []
for c in cycles:
result.append(sorted(tuple(c)))
result.sort()
for res in result:
print ','.join(res)
input.close() | {
"content_hash": "53197252d3a82c85d8af17229ec1f66a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 54,
"avg_line_length": 22.215686274509803,
"alnum_prop": 0.5939982347749339,
"repo_name": "digaobarbosa/algorithms",
"id": "c1bbceae90f4ec363051356b8e7fa73b3b321d79",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peak_traffic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24295"
}
],
"symlink_target": ""
} |
import json
import functools
from django.conf import settings
from django.test import Client, TestCase
__all__ = ['JsonTestClient', 'JsonTestMixin', 'JsonTestCase']
class JsonTestClient(Client):
def _json_request(self, method, url, data=None, *args, **kwargs):
method_func = getattr(super(JsonTestClient, self), method)
if method == 'get':
encode = lambda x: x
else:
encode = json.dumps
if data is not None:
resp = method_func(url, encode(data), content_type='application/json', *args, **kwargs)
else:
resp = method_func(url, content_type='application/json', *args, **kwargs)
if resp.get('Content-Type', '').startswith('application/json') and resp.content:
charset = resp.charset if hasattr(resp, 'charset') else settings.DEFAULT_CHARSET
resp.json = json.loads(resp.content.decode(charset))
return resp
def __getattribute__(self, attr):
if attr in ('get', 'post', 'put', 'delete', 'trace', 'head', 'patch', 'options'):
return functools.partial(self._json_request, attr)
else:
return super(JsonTestClient, self).__getattribute__(attr)
class JsonTestMixin(object):
client_class = JsonTestClient
class JsonTestCase(JsonTestMixin, TestCase):
pass
| {
"content_hash": "8da8a508e1fb633ec61fc0cef3287d56",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 99,
"avg_line_length": 32.65853658536585,
"alnum_prop": 0.6325616131441374,
"repo_name": "fusionbox/django-argonauts",
"id": "0775979154abf59b2f6054b2e4eb2fb06da7fa46",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argonauts/testutils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18608"
}
],
"symlink_target": ""
} |
"""
Created on Tue Feb 17 11:15:48 2015
@author: Andreu Mora
Example script to demonstrate the fundamentals of Hidden Markov Models based on
[Rabiner 89] A Tutorial on Hidden Markov Models and Selected Applications in
Speech Recognition.
"""
import pandas as pd
import numpy as np
class SimpleHMM:
## Attributes
# Transition Probability Matrix
A = [];
# Symbol emission Probability Matrix
B = [];
# State Probability Matrix
PI = [];
## Methods
def __init__(self, aA, aB, aPI):
self.A = aA;
self.B = aB;
self.PI = aPI;
def getOrder (self):
return len(self.A.index);
def getStatesNames (self):
return self.A.index;
def getNumberOfSymbols (self):
return len(self.B.columns);
def getSymbolsNames (self):
return self.B.columns;
def computeForwardVariable (self, aSymbolSequence):
"""" Computes the matrix alpha (forward variable) for all the states
alpha_t (i) = Prob { O1, O2, ..., Ot, q_t = S_i | lambda }
"""
# The sequence is given in any form of iterable
aAlpha = pd.DataFrame(columns=self.getStatesNames(), index=aSymbolSequence);
# Initialize for each state
for myState in self.getStatesNames():
aAlpha[myState].iloc[0] = self.PI[myState]*self.B[myState].loc[aSymbolSequence[0]];
# Run for all the other emitted symbols
for i in range(1,len(aSymbolSequence)):
for myState in self.getStatesNames():
aAlpha[myState].iloc[i] = ((self.A[myState]*aAlpha.iloc[i-1]).sum())*self.B[myState].loc[aSymbolSequence[i]];
return aAlpha
def computeBackwardVariable (self, aSymbolSequence):
""" Computes the matrix beta (backward variable) for all the states
beta_t(i) = Prob { Ot+1, Ot+2, ..., OT, q_t = S_i | lambda }
"""
aBeta = pd.DataFrame(columns=self.getStatesNames(), index=aSymbolSequence);
# Initialize the last state to ones per convention
aBeta.iloc[-1] = np.ones((1,self.getOrder()));
for i in range(len(aSymbolSequence)-2,-1,-1):
for myState in self.getStatesNames():
aBeta[myState].iloc[i] = (aBeta.iloc[i+1]*self.B.loc[aSymbolSequence[i+1]]*self.A.loc[myState]).sum();
return aBeta;
def computeProbabilityOfSymbolSequence (self, aSymbolSequence):
""" Computes the probability of a symbol sequence """
# The probability is the sum at the last
myAlpha = self.computeForwardVariable (aSymbolSequence);
return myAlpha.sum(axis=1).iloc[-1];
def computeViterbi (self, aSymbolSequence):
""" Computes the Viterbi algorithm for finding the most probable state path """
# Delta equals a transposed Trellis diagram
aDelta = pd.DataFrame(columns=self.getStatesNames(), index=aSymbolSequence);
aPath = pd.DataFrame(columns=self.getStatesNames(), index=aSymbolSequence);
# Initialization
for myState in self.getStatesNames():
aDelta[myState].iloc[0] = self.PI[myState]*self.B[myState].loc[aSymbolSequence[0]];
# Recursive loop
for i in range(1,len(aSymbolSequence)):
for myState in self.getStatesNames():
aDelta[myState].iloc[i] = self.B[myState].loc[aSymbolSequence[i]]*(self.A.loc[myState]*aDelta.iloc[i-1]).max();
aPath[myState].iloc[i] = aDelta.columns[np.argmax(self.A.loc[myState]*aDelta.iloc[i-1])];
return aDelta, aPath;
def computeMostProbableStateSequence (self, aSymbolSequence):
""" Computes the most probable input sequence for a specific symbol sequence """
# Compute by Viterbi the best probability and undo the path
myDelta, myPath = self.computeViterbi(aSymbolSequence);
# Select the highest probability, if many then choose one
aProbability = myDelta.iloc[-1].max();
aStateSequence = pd.Series(index=aSymbolSequence, data=""*len(aSymbolSequence));
aStateSequence.iloc[-1] = myDelta.columns[np.argmax(myDelta.iloc[-1])];
for i in range(len(aSymbolSequence)-2,-1,-1):
# Recall that the path variable is forwarded by 1 unit already
aStateSequence.iloc[i] = myPath[aStateSequence.iloc[i+1]].iloc[i+1];
return aStateSequence, aProbability;
# MAIN TEST
myStates = ['H', 'L'];
mySymbols = ['G', 'T', 'C', 'A'];
myPI = pd.Series(data=[0.5, 0.5], index=myStates);
myA = pd.DataFrame(index=myStates, columns=myStates);
myA['H'].loc['H'] = 0.5;
myA['H'].loc['L'] = 0.5;
myA['L'].loc['H'] = 0.4;
myA['L'].loc['L'] = 0.6;
myB = pd.DataFrame(columns=myStates, index=mySymbols);
myB['H'].loc['G'] = 0.3;
myB['H'].loc['T'] = 0.2;
myB['H'].loc['C'] = 0.3;
myB['H'].loc['A'] = 0.2;
myB['L'].loc['G'] = 0.2;
myB['L'].loc['T'] = 0.3;
myB['L'].loc['C'] = 0.2;
myB['L'].loc['A'] = 0.3;
myHMM = SimpleHMM(myA, myB, myPI);
mySymbolSequence = ['G', 'G', 'C', 'A', 'C', 'T', 'G', 'A', 'A']
myStateSequence, myProb = myHMM.computeMostProbableStateSequence(mySymbolSequence);
print myStateSequence
print myProb;
| {
"content_hash": "2854f85f91c1e913ef646104262b4b4f",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 127,
"avg_line_length": 38.37323943661972,
"alnum_prop": 0.5935033951183704,
"repo_name": "drublackberry/fantastic_demos",
"id": "03cb2244c26cfd1753ab830899f14bceb3198d02",
"size": "5474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Markov/HMM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2418877"
},
{
"name": "Python",
"bytes": "22565"
}
],
"symlink_target": ""
} |
"""File utitilies for Python:
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.1 $
Start Date: 2001/09/26
Last Revision Date: $Date: 2006-09-06 09:50:08 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.1 $"[11:-2]
from glob import glob
import os
from os import listdir
import os.path
import re
from types import StringType
from tempfile import mktemp
def _escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
return escapeRE.sub(r'\\\1' , txt)
def findFiles(*args, **kw):
"""Recursively find all the files matching a glob pattern.
This function is a wrapper around the FileFinder class. See its docstring
for details about the accepted arguments, etc."""
return FileFinder(*args, **kw).files()
def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results()
def replaceRegexInFiles(files, pattern, repl):
"""Replace all instances of regex 'pattern' with 'repl' for each file in the
'files' list. Returns a dictionary with data about the matches found.
This is like re.sub on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
return FindAndReplace(files, pattern, repl).results()
##################################################
## CLASSES
class FileFinder:
"""Traverses a directory tree and finds all files in it that match one of
the specified glob patterns."""
def __init__(self, rootPath,
globPatterns=('*',),
ignoreBasenames=('CVS','.svn'),
ignoreDirs=(),
):
self._rootPath = rootPath
self._globPatterns = globPatterns
self._ignoreBasenames = ignoreBasenames
self._ignoreDirs = ignoreDirs
self._files = []
self.walkDirTree(rootPath)
def walkDirTree(self, dir='.',
listdir=os.listdir,
isdir=os.path.isdir,
join=os.path.join,
):
"""Recursively walk through a directory tree and find matching files."""
processDir = self.processDir
filterDir = self.filterDir
pendingDirs = [dir]
addDir = pendingDirs.append
getDir = pendingDirs.pop
while pendingDirs:
dir = getDir()
## process this dir
processDir(dir)
## and add sub-dirs
for baseName in listdir(dir):
fullPath = join(dir, baseName)
if isdir(fullPath):
if filterDir(baseName, fullPath):
addDir( fullPath )
def filterDir(self, baseName, fullPath):
"""A hook for filtering out certain dirs. """
return not (baseName in self._ignoreBasenames or
fullPath in self._ignoreDirs)
def processDir(self, dir, glob=glob):
extend = self._files.extend
for pattern in self._globPatterns:
extend( glob(os.path.join(dir, pattern)) )
def files(self):
return self._files
class _GenSubberFunc:
"""Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
groups, etc.) into a function that can be used to do the substitutions in
the FindAndReplace class."""
backrefRE = re.compile(r'\\([1-9][0-9]*)')
groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
def __init__(self, replaceStr):
self._src = replaceStr
self._pos = 0
self._codeChunks = []
self.parse()
def src(self):
return self._src
def pos(self):
return self._pos
def setPos(self, pos):
self._pos = pos
def atEnd(self):
return self._pos >= len(self._src)
def advance(self, offset=1):
self._pos += offset
def readTo(self, to, start=None):
if start == None:
start = self._pos
self._pos = to
if self.atEnd():
return self._src[start:]
else:
return self._src[start:to]
## match and get methods
def matchBackref(self):
return self.backrefRE.match(self.src(), self.pos())
def getBackref(self):
m = self.matchBackref()
self.setPos(m.end())
return m.group(1)
def matchGroup(self):
return self.groupRE.match(self.src(), self.pos())
def getGroup(self):
m = self.matchGroup()
self.setPos(m.end())
return m.group(1)
## main parse loop and the eat methods
def parse(self):
while not self.atEnd():
if self.matchBackref():
self.eatBackref()
elif self.matchGroup():
self.eatGroup()
else:
self.eatStrConst()
def eatStrConst(self):
startPos = self.pos()
while not self.atEnd():
if self.matchBackref() or self.matchGroup():
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self.addChunk(repr(strConst))
def eatBackref(self):
self.addChunk( 'm.group(' + self.getBackref() + ')' )
def eatGroup(self):
self.addChunk( 'm.group("' + self.getGroup() + '")' )
def addChunk(self, chunk):
self._codeChunks.append(chunk)
## code wrapping methods
def codeBody(self):
return ', '.join(self._codeChunks)
def code(self):
return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
def subberFunc(self):
exec self.code()
return subber
class FindAndReplace:
"""Find and replace all instances of 'patternOrRE' with 'replacement' for
each file in the 'files' list. This is a multi-file version of re.sub().
'patternOrRE' can be a raw regex pattern or
a regex object as generated by the re module. 'replacement' can be any
string that would work with patternOrRE.sub(replacement, fileContents).
"""
def __init__(self, files, patternOrRE, replacement,
recordResults=True):
if type(patternOrRE) == StringType:
self._regex = re.compile(patternOrRE)
else:
self._regex = patternOrRE
if type(replacement) == StringType:
self._subber = _GenSubberFunc(replacement).subberFunc()
else:
self._subber = replacement
self._pattern = pattern = self._regex.pattern
self._files = files
self._results = {}
self._recordResults = recordResults
## see if we should use pgrep to do the file matching
self._usePgrep = False
if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
## now check to make sure pgrep understands the pattern
tmpFile = mktemp()
open(tmpFile, 'w').write('#')
if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
# it didn't print an error msg so we're ok
self._usePgrep = True
os.remove(tmpFile)
self._run()
def results(self):
return self._results
def _run(self):
regex = self._regex
subber = self._subDispatcher
usePgrep = self._usePgrep
pattern = self._pattern
for file in self._files:
if not os.path.isfile(file):
continue # skip dirs etc.
self._currFile = file
found = False
if locals().has_key('orig'):
del orig
if self._usePgrep:
if os.popen('pgrep "' + pattern + '" ' + file ).read():
found = True
else:
orig = open(file).read()
if regex.search(orig):
found = True
if found:
if not locals().has_key('orig'):
orig = open(file).read()
new = regex.sub(subber, orig)
open(file, 'w').write(new)
def _subDispatcher(self, match):
if self._recordResults:
if not self._results.has_key(self._currFile):
res = self._results[self._currFile] = {}
res['count'] = 0
res['matches'] = []
else:
res = self._results[self._currFile]
res['count'] += 1
res['matches'].append({'contents':match.group(),
'start':match.start(),
'end':match.end(),
}
)
return self._subber(match)
class SourceFileStats:
"""
"""
_fileStats = None
def __init__(self, files):
self._fileStats = stats = {}
for file in files:
stats[file] = self.getFileStats(file)
def rawStats(self):
return self._fileStats
def summary(self):
codeLines = 0
blankLines = 0
commentLines = 0
totalLines = 0
for fileStats in self.rawStats().values():
codeLines += fileStats['codeLines']
blankLines += fileStats['blankLines']
commentLines += fileStats['commentLines']
totalLines += fileStats['totalLines']
stats = {'codeLines':codeLines,
'blankLines':blankLines,
'commentLines':commentLines,
'totalLines':totalLines,
}
return stats
def printStats(self):
pass
def getFileStats(self, fileName):
codeLines = 0
blankLines = 0
commentLines = 0
commentLineRe = re.compile(r'\s#.*$')
blankLineRe = re.compile('\s$')
lines = open(fileName).read().splitlines()
totalLines = len(lines)
for line in lines:
if commentLineRe.match(line):
commentLines += 1
elif blankLineRe.match(line):
blankLines += 1
else:
codeLines += 1
stats = {'codeLines':codeLines,
'blankLines':blankLines,
'commentLines':commentLines,
'totalLines':totalLines,
}
return stats
| {
"content_hash": "7dbf02cfbeca94d2929e74adca7c1977",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 81,
"avg_line_length": 31.048387096774192,
"alnum_prop": 0.5146320346320347,
"repo_name": "skyostil/tracy",
"id": "fb1ecc2bc0b0a6cda0797f999b816075c72eb6eb",
"size": "11635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/generator/Cheetah/FileUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "952865"
},
{
"name": "C++",
"bytes": "165814"
},
{
"name": "Prolog",
"bytes": "554"
},
{
"name": "Python",
"bytes": "1384305"
},
{
"name": "Shell",
"bytes": "4482"
}
],
"symlink_target": ""
} |
lyr = iface.activeLayer()
feats = lyr.getFeatures()
xmin=180
ymin=90
xmax=-180
ymax=-90
vertices=0
for feat in feats:
bb = feat.geometry().boundingBox()
#print bb.toString()
xmin = xmin if xmin < bb.xMinimum() else bb.xMinimum()
ymin = ymin if ymin < bb.yMinimum() else bb.yMinimum()
xmax = xmax if xmax > bb.xMaximum() else bb.xMaximum()
ymax = ymax if ymax > bb.yMaximum() else bb.yMaximum()
lines = feat.geometry().asPolyline()
vertices+=len(lines)
print xmin, ymin, xmax, ymax
print vertices
| {
"content_hash": "b67115e226a50eba4f2998c2c9bf5a1f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6742424242424242,
"repo_name": "BergWerkGIS/detect-corrupt-tiff",
"id": "8957ee6f55db5d99e2a4f80df4e369f310931cb9",
"size": "528",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "QGIS/get-bboxes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10759"
},
{
"name": "Python",
"bytes": "8714"
}
],
"symlink_target": ""
} |
from django import template
from django.conf import settings
from django.shortcuts import render_to_response
import os
import subprocess
register = template.Library()
@register.simple_tag
def socketio_head():
return """
<script src="/socket.io/socket.io.js"></script>
<script type="text/javascript">
var socket = io();
socket.on('python_exception', function(data){
console.log("Python exception:");
console.log(data.exception);
});
function getCookie(name) {
var value = "; " + document.cookie;
var parts = value.split("; " + name + "=");
if (parts.length == 2) return parts.pop().split(";").shift();
}
</script>
"""
| {
"content_hash": "e3431f32a773fe3cc5b35a8b916fe75a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 81,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.5140845070422535,
"repo_name": "pztrick/django-socketio-events",
"id": "4547c8d301e0b42b04b920efd704a7bef58b5e3a",
"size": "852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodjango/templatetags/socketio_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1682"
},
{
"name": "Python",
"bytes": "8269"
}
],
"symlink_target": ""
} |
import copy
import glob
import os
import os.path as op
import shutil
import pathlib
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (requires_nibabel, Bunch,
run_tests_if_main, requires_h5py)
from mne.viz import plot_alignment
from mne.io.write import DATE_NONE
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
proj_fname = op.join(report_dir, 'sample_audvis_ecg-proj.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
"""Create two example figures."""
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report(renderer, tmpdir):
"""Test rendering -*.fif files for mne report."""
tempdir = str(tmpdir)
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[raw_fname, raw_fname_new_bids],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[proj_fname, proj_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
# Speed it up by picking channels
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002'])
raw.del_proj()
raw.set_eeg_reference(projection=True)
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# This can take forever (stall Travis), so let's make it fast
# Also, make sure crop range is wide enough to avoid rendering bug
evoked = epochs.average().crop(0.1, 0.2)
evoked.save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
projs=True)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
# Projectors in Raw.info
assert '<h4>SSP Projectors</h4>' in html
# Projectors in `proj_fname_new`
assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html
# Evoked in `evoked_fname`
assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html
assert 'Topomap (ch_type =' in html
assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
tempdir = pathlib.Path(tempdir) # test using pathlib.Path
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='figure must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='figure must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date(tmpdir):
"""Test report raw PSD and DATE_NONE functionality."""
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = str(tmpdir)
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# test new anonymize functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
# old style (pre 0.20) date anonymization
raw.info['meas_date'] = None
for key in ('file_id', 'meas_id'):
value = raw.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
def test_render_add_sections(renderer, tmpdir):
"""Test adding figures/images to section."""
tempdir = str(tmpdir)
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
fname = op.join(str(tmpdir), 'test.html')
report.save(fname, open_browser=False)
with open(fname, 'r') as fid:
html = fid.read()
assert html.count('<li class="report_custom"') == 8 # several
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri(renderer, tmpdir):
"""Test rendering MRI for mne report."""
tempdir = str(tmpdir)
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
fname = op.join(tempdir, 'report.html')
report.save(fname, open_browser=False)
with open(fname, 'r') as fid:
html = fid.read()
assert html.count('<li class="bem"') == 2 # left and content
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(fname, open_browser=False, overwrite=True)
with open(fname, 'r') as fid:
html = fid.read()
assert 'report_report' not in html
assert html.count('<li class="report_foo"') == 2
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem(tmpdir):
"""Test rendering MRI without BEM for mne report."""
tempdir = str(tmpdir)
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
with pytest.warns(RuntimeWarning, match='No BEM surfaces found'):
report.parse_folder(tempdir, render_bem=True, mri_decim=20)
assert 'bem' in report.fnames
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section(tmpdir):
"""Test adding a slider with a series of images to mne report."""
tempdir = str(tmpdir)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel('µ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report(tmpdir):
"""Test the open_report function."""
tempdir = str(tmpdir)
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
"""Test removing figures from a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
# Test removal by caption
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
# Test restricting to section
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
# Test removal of empty sections
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
"""Test replacing existing figures in a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
# By default, replace=False, so all figures should be there
assert len(r.html) == 4
old_r = copy.deepcopy(r)
# Re-add fig1 with replace=True, it should overwrite the last occurrence of
# fig1 in section 'mysection'.
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1] # This figure should have changed
# All other figures should be the same
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
"""Test report scraping."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
# Mock a Sphinx + sphinx_gallery config
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
# Nothing yet
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
# Still nothing
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
# Once it's saved, add it
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
run_tests_if_main()
| {
"content_hash": "2376e6365aadc6692d033c96e87e24ca",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 79,
"avg_line_length": 40.014227642276424,
"alnum_prop": 0.6387463808604663,
"repo_name": "cjayb/mne-python",
"id": "f8b555947d4707e571b960b41d94d56a4044f975",
"size": "19832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/tests/test_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "7901053"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import sys
from raven.utils.testutils import TestCase
from raven.utils import six
from raven.base import Client
from raven.handlers.logging import SentryHandler
from raven.utils.stacks import iter_stack_frames
class TempStoreClient(Client):
def __init__(self, servers=None, **kwargs):
self.events = []
super(TempStoreClient, self).__init__(servers=servers, **kwargs)
def is_enabled(self):
return True
def send(self, **kwargs):
self.events.append(kwargs)
class LoggingIntegrationTest(TestCase):
def setUp(self):
self.client = TempStoreClient(include_paths=['tests', 'raven'])
self.handler = SentryHandler(self.client)
def make_record(self, msg, args=(), level=logging.INFO, extra=None, exc_info=None):
record = logging.LogRecord('root', level, __file__, 27, msg, args, exc_info, 'make_record')
if extra:
for key, value in six.iteritems(extra):
record.__dict__[key] = value
return record
def test_logger_basic(self):
record = self.make_record('This is a test error')
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEqual(event['logger'], 'root')
self.assertEqual(event['level'], logging.INFO)
self.assertEqual(event['message'], 'This is a test error')
self.assertFalse('sentry.interfaces.Stacktrace' in event)
self.assertFalse('sentry.interfaces.Exception' in event)
self.assertTrue('sentry.interfaces.Message' in event)
msg = event['sentry.interfaces.Message']
self.assertEqual(msg['message'], 'This is a test error')
self.assertEqual(msg['params'], ())
def test_logger_extra_data(self):
record = self.make_record('This is a test error', extra={'data': {
'url': 'http://example.com',
}})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
if six.PY3:
expected = "'http://example.com'"
else:
expected = "u'http://example.com'"
self.assertEqual(event['extra']['url'], expected)
def test_logger_exc_info(self):
try:
raise ValueError('This is a test ValueError')
except ValueError:
record = self.make_record('This is a test info with an exception', exc_info=sys.exc_info())
else:
self.fail('Should have raised an exception')
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEqual(event['message'], 'This is a test info with an exception')
self.assertTrue('sentry.interfaces.Stacktrace' in event)
self.assertTrue('sentry.interfaces.Exception' in event)
exc = event['sentry.interfaces.Exception']
self.assertEqual(exc['type'], 'ValueError')
self.assertEqual(exc['value'], 'This is a test ValueError')
self.assertTrue('sentry.interfaces.Message' in event)
msg = event['sentry.interfaces.Message']
self.assertEqual(msg['message'], 'This is a test info with an exception')
self.assertEqual(msg['params'], ())
def test_message_params(self):
record = self.make_record('This is a test of %s', args=('args',))
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEqual(event['message'], 'This is a test of args')
msg = event['sentry.interfaces.Message']
self.assertEqual(msg['message'], 'This is a test of %s')
expected = ("'args'",) if six.PY3 else ("u'args'",)
self.assertEqual(msg['params'], expected)
def test_record_stack(self):
record = self.make_record('This is a test of stacks', extra={'stack': True})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertTrue('sentry.interfaces.Stacktrace' in event)
frames = event['sentry.interfaces.Stacktrace']['frames']
self.assertNotEquals(len(frames), 1)
frame = frames[0]
self.assertEqual(frame['module'], 'raven.handlers.logging')
self.assertFalse('sentry.interfaces.Exception' in event)
self.assertTrue('sentry.interfaces.Message' in event)
self.assertEqual(event['culprit'], 'root in make_record')
self.assertEqual(event['message'], 'This is a test of stacks')
def test_no_record_stack(self):
record = self.make_record('This is a test with no stacks', extra={'stack': False})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEqual(event['message'], 'This is a test with no stacks')
self.assertFalse('sentry.interfaces.Stacktrace' in event)
def test_explicit_stack(self):
record = self.make_record('This is a test of stacks', extra={'stack': iter_stack_frames()})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
assert 'sentry.interfaces.Stacktrace' in event
assert 'culprit' in event
assert event['culprit'] == 'root in make_record'
self.assertTrue('message' in event, event)
self.assertEqual(event['message'], 'This is a test of stacks')
self.assertFalse('sentry.interfaces.Exception' in event)
self.assertTrue('sentry.interfaces.Message' in event)
msg = event['sentry.interfaces.Message']
self.assertEqual(msg['message'], 'This is a test of stacks')
self.assertEqual(msg['params'], ())
def test_extra_culprit(self):
record = self.make_record('This is a test of stacks', extra={'culprit': 'foo in bar'})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
self.assertEqual(event['culprit'], 'foo in bar')
def test_extra_data_as_string(self):
record = self.make_record('Message', extra={'data': 'foo'})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
expected = "'foo'" if six.PY3 else "u'foo'"
self.assertEqual(event['extra']['data'], expected)
def test_tags(self):
record = self.make_record('Message', extra={'tags': {'foo': 'bar'}})
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
assert event['tags'] == {'foo': 'bar'}
def test_tags_on_error(self):
try:
raise ValueError('This is a test ValueError')
except ValueError:
record = self.make_record('Message', extra={'tags': {'foo': 'bar'}}, exc_info=sys.exc_info())
self.handler.emit(record)
self.assertEqual(len(self.client.events), 1)
event = self.client.events.pop(0)
assert event['tags'] == {'foo': 'bar'}
class LoggingHandlerTest(TestCase):
def test_client_arg(self):
client = TempStoreClient(include_paths=['tests'])
handler = SentryHandler(client)
self.assertEqual(handler.client, client)
def test_client_kwarg(self):
client = TempStoreClient(include_paths=['tests'])
handler = SentryHandler(client=client)
self.assertEqual(handler.client, client)
def test_args_as_servers_and_key(self):
handler = SentryHandler(['http://sentry.local/api/store/'], 'KEY')
self.assertTrue(isinstance(handler.client, Client))
def test_first_arg_as_dsn(self):
handler = SentryHandler('http://public:secret@example.com/1')
self.assertTrue(isinstance(handler.client, Client))
def test_custom_client_class(self):
handler = SentryHandler('http://public:secret@example.com/1', client_cls=TempStoreClient)
self.assertTrue(type(handler.client), TempStoreClient)
def test_invalid_first_arg_type(self):
self.assertRaises(ValueError, SentryHandler, object)
def test_logging_level_set(self):
handler = SentryHandler('http://public:secret@example.com/1', level="ERROR")
# XXX: some version of python 2.6 seem to pass the string on instead of coercing it
self.assertTrue(handler.level in (logging.ERROR, 'ERROR'))
def test_logging_level_not_set(self):
handler = SentryHandler('http://public:secret@example.com/1')
self.assertEqual(handler.level, logging.NOTSET)
| {
"content_hash": "84c0e93e7f4b80462883f2f80cf3163c",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 105,
"avg_line_length": 40.99065420560748,
"alnum_prop": 0.6394208846329229,
"repo_name": "gregorynicholas/raven-python",
"id": "248cdcc69877e9e871948717550832d835fef0ca",
"size": "8772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/handlers/logging/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
#-*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Cube File Support
##########################
Cube files contain an atomic geometry and scalar field values corresponding to
a physical quantity.
"""
import os
import six
import numpy as np
import pandas as pd
from glob import glob
from exatomic.exa import Series, TypedMeta
from exatomic import __version__, Atom, Editor, AtomicField, Frame, Universe
from exatomic.base import z2sym, sym2z
class Meta(TypedMeta):
atom = Atom
frame = Frame
field = AtomicField
class Cube(six.with_metaclass(Meta, Editor)):
"""
An editor for handling cube files. Assumes scalar field values are arranged
with the x axis as the outer loop and the z axis as the inner loop.
.. code-block:: python
cube = Cube('my.cube')
cube.atom # Displays the atom dataframe
cube.field # Displays the field dataframe
cube.field.field_values # Displays the list of field values
uni = cube.to_universe() # Converts the cube file editor to a universe
UniverseWidget(uni) # Renders the cube file
Warning:
Be sure your cube is in atomic units.
"""
def parse_atom(self):
"""
Parse the :class:`~exatomic.atom.Atom` object from the cube file in place.
"""
nat = abs(int(self[2].split()[0]))
names = ['Z', 'Zeff', 'x', 'y', 'z']
df = self.pandas_dataframe(6, nat + 6, names)
df['symbol'] = df['Z'].map(z2sym).astype('category')
df['label'] = range(nat)
df['frame'] = 0
self.atom = Atom(df)
def parse_field(self):
"""
Parse the scalar field into an :class:`~exatomic.core.field.AtomicField`.
Note:
The :class:`~exatomic.core.field.AtomicField` tracks both the
field parameters (i.e. information about the discretization and shape of
the field's spatial points) as well as the field values (at each of
those points in space). See :meth:`~exatomic.algorithms.orbital_util.make_fps`
for more details.
"""
self.meta = {'comments': self[:2]}
typs = [int, float, float, float]
nat, ox, oy, oz = [typ(i) for typ, i in zip(typs, self[2].split())]
nx, dxi, dxj, dxk = [typ(i) for typ, i in zip(typs, self[3].split())]
ny, dyi, dyj, dyk = [typ(i) for typ, i in zip(typs, self[4].split())]
nz, dzi, dzj, dzk = [typ(i) for typ, i in zip(typs, self[5].split())]
nat, nx, ny, nz = abs(nat), abs(nx), abs(ny), abs(nz)
volstart = nat + 6
if len(self[volstart].split()) < 5:
if not len(self[volstart + 1].split()) < 5:
volstart += 1
ncol = len(self[volstart].split())
data = self.pandas_dataframe(volstart, len(self), ncol).values.ravel()
df = pd.Series({'ox': ox, 'oy': oy, 'oz': oz,
'nx': nx, 'ny': ny, 'nz': nz,
'dxi': dxi, 'dxj': dxj, 'dxk': dxk,
'dyi': dyi, 'dyj': dyj, 'dyk': dyk,
'dzi': dzi, 'dzj': dzj, 'dzk': dzk,
'frame': 0, 'label': self.label,
'field_type': self.field_type}).to_frame().T
for col in ['nx', 'ny', 'nz']:
df[col] = df[col].astype(np.int64)
for col in ['ox', 'oy', 'oz', 'dxi', 'dxj', 'dxk',
'dyi', 'dyj', 'dyk', 'dzi', 'dzj', 'dzk']:
df[col] = df[col].astype(np.float64)
fields = [Series(data[~np.isnan(data)])]
self.field = AtomicField(df, field_values=fields)
@classmethod
def from_universe(cls, uni, idx, name=None, frame=None):
"""
Make a cube file format Editor from a given field in a
:class:`~exatomic.core.universe.Universe`.
Args:
uni (:class:`~exatomic.core.universe.Universe`): a universe
idx (int): field index in :class:`~exatomic.core.field.AtomicField`
name (str): description for comment line
frame (int): frame index in :class:`~exatomic.core.atom.Atom`
"""
name = '' if name is None else name
frame = uni.atom.nframes - 1 if frame is None else frame
hdr = '{} -- written by exatomic v{}\n\n'
ffmt = ' {:> 12.6f}'
flfmt = ('{:>5}' + ffmt * 3 + '\n').format
if 'Z' not in uni.atom:
uni.atom['Z'] = uni.atom['symbol'].map(sym2z)
if 'Zeff' not in uni.atom:
uni.atom['Zeff'] = uni.atom['Z'].astype(np.float64)
frame = uni.atom[uni.atom['frame'] == frame]
for col in ['nx', 'ny', 'nz']:
uni.field[col] = uni.field[col].astype(np.int64)
field = uni.field.loc[idx]
volum = uni.field.field_values[idx]
orig = len(frame.index), field.ox, field.oy, field.oz
nx, ny, nz = int(field.nx), int(field.ny), int(field.nz)
xdim = nx, field.dxi, field.dxj, field.dxk
ydim = ny, field.dyi, field.dyj, field.dyk
zdim = nz, field.dzi, field.dzj, field.dzk
atargs = {'float_format': '%12.6f',
'header': None, 'index': None,
'columns': ['Z', 'Zeff', 'x', 'y', 'z']}
chnk = ''.join(['{}' * 6 + '\n' for i in range(nz // 6)])
if nz % 6: chnk += '{}' * (nz % 6) + '\n'
return cls(hdr.format(name, __version__)
+ flfmt(*orig) + flfmt(*xdim)
+ flfmt(*ydim) + flfmt(*zdim)
+ uni.atom.to_string(**atargs) + '\n'
+ (chnk * nx * ny).format(*volum.apply(
ffmt.replace('f', 'E').format)))
def __init__(self, *args, **kwargs):
label = kwargs.pop("label", None)
field_type = kwargs.pop("field_type", None)
super(Cube, self).__init__(*args, **kwargs)
self.label = label
self.field_type = field_type
def uni_from_cubes(adir, verbose=False, ncubes=None, ext='cube'):
"""Put a bunch of cubes into a universe.
.. code-block:: python
uni = uni_from_cubes('/path/to/files/') # Parse all cubes matching 'files/*cube'
uni = uni_from_cubes('files/', ext='cub') # Parse all cubes matching 'files/*cub'
uni = uni_from_cubes('files/', verbose=True) # Print file names when parsing
uni = uni_from_cubes('files/', ncubes=5) # Only parse the first 5 cubes
# sorted lexicographically by file name
Args:
verbose (bool): print file names when reading cubes
ncubes (int): get only the first ncubes
ext (str): file extension of cube files
Returns:
uni (:class:`exatomic.core.universe.Universe`)
"""
if not adir.endswith(os.sep): adir += os.sep
cubes = sorted(glob(adir + '*' + ext))
if ncubes is not None:
cubes = cubes[:ncubes]
if verbose:
for cub in cubes: print(cub)
uni = Universe(atom=Cube(cubes[0]).atom)
flds = [Cube(cub).field for cub in cubes]
uni.add_field(flds)
return uni
| {
"content_hash": "ec6a5f02587a1b5c292932908037409b",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 94,
"avg_line_length": 41.293103448275865,
"alnum_prop": 0.5459986082115519,
"repo_name": "exa-analytics/exatomic",
"id": "7bf7d77d71bd169dc04310a38a568895c8cb4444",
"size": "7185",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "exatomic/interfaces/cube.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "762"
},
{
"name": "JavaScript",
"bytes": "121101"
},
{
"name": "Jupyter Notebook",
"bytes": "13176"
},
{
"name": "Python",
"bytes": "1084816"
},
{
"name": "Shell",
"bytes": "711"
},
{
"name": "TypeScript",
"bytes": "953"
}
],
"symlink_target": ""
} |
"""\
Please update your code to use authkit.authorize.wsgi_adaptors instead of this
module.
"""
from authkit.authorize.wsgi_adaptors import *
| {
"content_hash": "1c8c7fd73648e883dedfce0b53f17dda",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 78,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.7692307692307693,
"repo_name": "cedadev/AuthKit",
"id": "8f1dfea7d55a2ce38f31e841eb08e5986c3cd1b1",
"size": "143",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "authkit/authorize/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321892"
}
],
"symlink_target": ""
} |
import os
import socket
import struct
import sys
import threading
import time
import urllib
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
from urllib import quote
input = raw_input
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from urllib.parse import quote
interactive = False
if len(sys.argv) <= 2:
# If there aren't enough variables, use interactive mode
if len(sys.argv) == 2:
if sys.argv[1].lower() in ('--help', '-help', 'help', 'h', '-h', '--h'):
print('Usage: ' + sys.argv[0] + ' <target ip> <file / directory> [host ip] [host port]')
sys.exit(1)
interactive = True
elif len(sys.argv) < 3 or len(sys.argv) > 6:
print('Usage: ' + sys.argv[0] + ' <target ip> <file / directory> [host ip] [host port]')
sys.exit(1)
accepted_extension = ('.cia', '.tik', '.cetk', '.3dsx')
hostPort = 8080 # Default value
if interactive:
target_ip = input("The IP of your 3DS: ")
target_path = input("The file you want to send (.cia, .tik, .cetk, or .3dsx): ")
hostIp = input("Host IP (or press Enter to have the script detect host IP):")
if hostIp == '':
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
else:
hostPort = input("Host port (or press Enter to keep default, 8080):")
if hostPort == '':
hostPort = 8080 # Default
else:
# (if the script is being run using a full python path; ex: "path/to/python script_name.py foo foo..")
if sys.argv[1] == os.path.basename(__file__):
target_ip = sys.argv[2]
target_path = sys.argv[3]
if len(sys.argv) >= 5:
hostIp = sys.argv[4]
if len(sys.argv) == 6:
hostPort = int(sys.argv[5])
else:
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
# (if the script is being run using just the script name and default executable for python scripts; ex: "script_name.py foo foo..")
else:
target_ip = sys.argv[1]
target_path = sys.argv[2]
if len(sys.argv) >= 4:
hostIp = sys.argv[3]
if len(sys.argv) == 5:
hostPort = int(sys.argv[4])
else:
print('Detecting host IP...')
hostIp = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
target_path = target_path.strip()
if not os.path.exists(target_path):
print(target_path + ': No such file or directory.')
sys.exit(1)
print('Preparing data...')
baseUrl = hostIp + ':' + str(hostPort) + '/'
if os.path.isfile(target_path):
if target_path.endswith(accepted_extension):
file_list_payload = baseUrl + quote(os.path.basename(target_path))
directory = os.path.dirname(target_path) # get file directory
else:
print('Unsupported file extension. Supported extensions are: ' + accepted_extension)
sys.exit(1)
else:
directory = target_path # it's a directory
file_list_payload = '' # init the payload before adding lines
for file in [file for file in next(os.walk(target_path))[2] if file.endswith(accepted_extension)]:
file_list_payload += baseUrl + quote(file) + '\n'
if len(file_list_payload) == 0:
print('No files to serve.')
sys.exit(1)
file_list_payloadBytes = file_list_payload.encode('ascii')
if directory and directory != '.': # doesn't need to move if it's already the current working directory
os.chdir(directory) # set working directory to the right folder to be able to serve files
print('\nURLs:')
print(file_list_payload + '\n')
class MyServer(TCPServer):
def server_bind(self):
import socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
print('Opening HTTP server on port ' + str(hostPort))
server = MyServer(('', hostPort), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
print('Sending URL(s) to ' + target_ip + ' on port 5000...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((target_ip, 5000))
sock.sendall(struct.pack('!L', len(file_list_payloadBytes)) + file_list_payloadBytes)
while len(sock.recv(1)) < 1:
time.sleep(0.05)
sock.close()
except Exception as e:
print('An error occurred: ' + str(e))
server.shutdown()
sys.exit(1)
print('Shutting down HTTP server...')
server.shutdown()
| {
"content_hash": "37a3a86308bfcd3592189177d06f5479",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 148,
"avg_line_length": 35.48550724637681,
"alnum_prop": 0.620992444353686,
"repo_name": "Jerry-Shaw/FBI",
"id": "9fb4a4d00f3fddd8b6d230ab9b01d251f77bc784",
"size": "4940",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "servefiles/servefiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "214"
},
{
"name": "C",
"bytes": "792166"
},
{
"name": "C++",
"bytes": "5230"
},
{
"name": "Makefile",
"bytes": "1026"
},
{
"name": "Objective-C",
"bytes": "352"
},
{
"name": "Python",
"bytes": "6419"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
} |
def build_vocabulary(data):
reviews_file = open(data, "r")
megadoc = ""
for line in reviews_file:
megadoc = megadoc + line
words = megadoc.split()
words.sort()
count=1
freq=[]
vocab =[]
while len(words) > 0:
temp = words[0]
vocab.append(str(temp))
count = words.count(temp)
for i in range(count):
words.remove(temp)
freq.append(str(count))
print(vocab)
print(words)
print(freq)
reviews_file.close()
output = open("frequency.txt", "w")
output.write("Name : Frequency \n")
for i in range(len(freq)):
output.write( vocab[i] +" : "+ freq[i])
output.write("\n")
output.close()
output = open("vocabulary.txt", "w")
for i in range(len(freq)):
if int(freq[i]) > 1:
output.write(vocab[i])
output.write("\n")
output.close()
return
build_vocabulary("data_without_stopwords.txt")
print("Program Successfully Terminated!")
| {
"content_hash": "b49c8a31caeabd79cb96eaff16d03f84",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 49,
"avg_line_length": 17.9672131147541,
"alnum_prop": 0.5155109489051095,
"repo_name": "bharathdintakurti/Natural-Language-Processing",
"id": "c8a4e266d2e6e30302fed2da887a20a412d2d855",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Vocabulary/vocabulary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import re
import time
import logging
import traceback
import colored
from dnutils import out, ifnone
from collections import defaultdict
import random
from functools import reduce
# math functions
USE_MPMATH = True
try:
if not USE_MPMATH:
raise Exception()
import mpmath # @UnresolvedImport
mpmath.mp.dps = 80
from mpmath import exp, fsum, log # @UnresolvedImport
except:
from math import exp, log
try:
from math import fsum
except: # not support ed in Python 2.5
fsum = sum
#sys.stderr.write("Warning: Falling back to standard math module because mpmath is not installed. If overflow errors occur, consider installing mpmath.")
from math import floor, ceil, e, sqrt
import math
def crash(*args, **kwargs):
out(*args, **edict(kwargs) + {'tb': kwargs.get('tb', 1) + 1})
print colorize('TERMINATING.', ('red', None, True), True)
exit(-1)
def flip(value):
'''
Flips the given binary value to its complement.
Works with ints and booleans.
'''
if type(value) is bool:
return True if value is False else False
elif type(value) is int:
return 1 - value
else:
TypeError('type {} not allowed'.format(type(value)))
def logx(x):
if x == 0:
return - 100
return math.log(x) #used for weights -> no high precision (mpmath) necessary
def batches(i, size):
batch = []
for e in i:
batch.append(e)
if len(batch) == size:
yield batch
batch = []
if batch: yield batch
def rndbatches(i, size):
i = list(i)
random.shuffle(i)
return batches(i, size)
def stripComments(text):
# comment = re.compile(r'//.*?$|/\*.*?\*/', re.DOTALL | re.MULTILINE)
# return re.sub(comment, '', text)
# this is a more sophisticated regex to replace c++ style comments
# taken from http://stackoverflow.com/questions/241327/python-snippet-to-remove-c-and-c-comments
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def parse_queries(mln, query_str):
'''
Parses a list of comma-separated query strings.
Admissible queries are all kinds of formulas or just predicate names.
Returns a list of the queries.
'''
queries = []
query_preds = set()
q = ''
for s in map(str.strip, query_str.split(',')):
if not s: continue
if q != '': q += ','
q += s
if balancedParentheses(q):
try:
# try to read it as a formula and update query predicates
f = mln.logic.parse_formula(q)
literals = f.literals()
prednames = map(lambda l: l.predname, literals)
query_preds.update(prednames)
except:
# not a formula, must be a pure predicate name
query_preds.add(s)
queries.append(q)
q = ''
if q != '': raise Exception('Unbalanced parentheses in queries: ' + q)
return queries
def predicate_declaration_string(predName, domains, blocks):
'''
Returns a string representation of the given predicate.
'''
args_list = ['%s%s' % (arg, {True: '!', False: ''}[block]) for arg, block in zip(domains, blocks)]
args = ', '.join(args_list)
return '%s(%s)' % (predName, args)
def getPredicateList(filename):
''' gets the set of predicate names from an MLN file '''
content = file(filename, "r").read() + "\n"
content = stripComments(content)
lines = content.split("\n")
predDecl = re.compile(r"(\w+)\([^\)]+\)")
preds = set()
for line in lines:
line = line.strip()
m = predDecl.match(line)
if m is not None:
preds.add(m.group(1))
return list(preds)
def avg(*a):
return sum(map(float, a)) / len(a)
class CallByRef(object):
'''
Convenience class for treating any kind of variable as an object that can be
manipulated in-place by a call-by-reference, in particular for primitive data types such as numbers.
'''
def __init__(self, value):
self.value = value
INC = 1
EXC = 2
class Interval():
def __init__(self, interval):
tokens = re.findall(r'(\(|\[|\])([-+]?\d*\.\d+|\d+),([-+]?\d*\.\d+|\d+)(\)|\]|\[)', interval.strip())[0]
if tokens[0] in ('(', ']'):
self.left = EXC
elif tokens[0] == '[':
self.left = INC
else:
raise Exception('Illegal interval: {}'.format(interval))
if tokens[3] in (')', '['):
self.right = EXC
elif tokens[3] == ']':
self.right = INC
else:
raise Exception('Illegal interval: {}'.format(interval))
self.start = float(tokens[1])
self.end = float(tokens[2])
def __contains__(self, x):
return (self.start <= x if self.left == INC else self.start < x) and (self.end >= x if self.right == INC else self.end > x)
def elapsedtime(start, end=None):
'''
Compute the elapsed time of the interval `start` to `end`.
Returns a pair (t,s) where t is the time in seconds elapsed thus
far (since construction) and s is a readable string representation thereof.
:param start: the starting point of the time interval.
:param end: the end point of the time interval. If `None`, the current time is taken.
'''
if end is not None:
elapsed = end - start
else:
elapsed = time.time() - start
return elapsed_time_str(elapsed)
def elapsed_time_str(elapsed):
hours = int(elapsed / 3600)
elapsed -= hours * 3600
minutes = int(elapsed / 60)
elapsed -= minutes * 60
secs = int(elapsed)
msecs = int((elapsed - secs) * 1000)
return '{}:{:02d}:{:02d}.{:03d}'.format(hours, minutes, secs, msecs)
def balancedParentheses(s):
cnt = 0
for c in s:
if c == '(':
cnt += 1
elif c == ')':
if cnt <= 0:
return False
cnt -= 1
return cnt == 0
def fstr(f):
s = str(f)
while s[0] == '(' and s[ -1] == ')':
s2 = s[1:-1]
if not balancedParentheses(s2):
return s
s = s2
return s
def cumsum(i, upto=None):
return 0 if (not i or upto == 0) else reduce(int.__add__, i[:ifnone(upto, len(i))])
def evidence2conjunction(evidence):
'''
Converts the evidence obtained from a database (dict mapping ground atom names to truth values) to a conjunction (string)
'''
evidence = map(lambda x: ("" if x[1] else "!") + x[0], evidence.iteritems())
return " ^ ".join(evidence)
def tty(stream):
isatty = getattr(stream, 'isatty', None)
return isatty and isatty()
BOLD = (None, None, True)
def headline(s):
l = ''.ljust(len(s), '=')
return '%s\n%s\n%s' % (colorize(l, BOLD, True), colorize(s, BOLD, True), colorize(l, BOLD, True))
def gaussianZeroMean(x, sigma):
return 1.0/sqrt(2 * math.pi * sigma**2) * math.exp(- (x**2) / (2 * sigma**2))
def gradGaussianZeroMean(x, sigma):
return - (0.3990434423 * x * math.exp(-0.5 * x**2 / sigma**2) ) / (sigma**3)
def mergedom(*domains):
'''
Returning a new domains dictionary that contains the elements of all the given domains
'''
fullDomain = {}
for domain in domains:
for domName, values in domain.iteritems():
if domName not in fullDomain:
fullDomain[domName] = set(values)
else:
fullDomain[domName].update(values)
for key, s in fullDomain.iteritems():
fullDomain[key] = list(s)
return fullDomain
def colorize(message, format, color=False):
'''
Returns the given message in a colorized format
string with ANSI escape codes for colorized console outputs:
- message: the message to be formatted.
- format: triple containing format information:
(bg-color, fg-color, bf-boolean) supported colors are
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
- color: boolean determining whether or not the colorization
is to be actually performed.
'''
if color is False: return message
(bg, fg, bold) = format
params = []
if bold:
params.append(colored.attr('bold'))
if bg:
params.append(colored.bg(bg))
if fg:
params.append(colored.fg(fg))
return colored.stylize(message, set(params))
class StopWatchTag:
def __init__(self, label, starttime, stoptime=None):
self.label = label
self.starttime = starttime
self.stoptime = stoptime
@property
def elapsedtime(self):
return ifnone(self.stoptime, time.time()) - self.starttime
@property
def finished(self):
return self.stoptime is not None
class StopWatch(object):
'''
Simple tagging of time spans.
'''
def __init__(self):
self.tags = {}
def tag(self, label, verbose=True):
if verbose:
print '%s...' % label
tag = self.tags.get(label)
now = time.time()
if tag is None:
tag = StopWatchTag(label, now)
else:
tag.starttime = now
self.tags[label] = tag
def finish(self, label=None):
now = time.time()
if label is None:
for _, tag in self.tags.iteritems():
tag.stoptime = ifnone(tag.stoptime, now)
else:
tag = self.tags.get(label)
if tag is None:
raise Exception('Unknown tag: %s' % label)
tag.stoptime = now
def __getitem__(self, key):
return self.tags.get(key)
def reset(self):
self.tags = {}
def printSteps(self):
for t in sorted(self.tags.values(), key=lambda t: t.starttime):
if t.finished:
print '%s took %s' % (colorize(t.label, (None, None, True), True), elapsed_time_str(t.elapsedtime))
else:
print '%s is running for %s now...' % (colorize(t.label, (None, None, True), True), elapsed_time_str(t.elapsedtime))
def combinations(domains):
if len(domains) == 0:
raise Exception('domains mustn\'t be empty')
return _combinations(domains, [])
def _combinations(domains, comb):
if len(domains) == 0:
yield comb
return
for v in domains[0]:
for ret in _combinations(domains[1:], comb + [v]):
yield ret
def deprecated(func):
'''
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used.
'''
def newFunc(*args, **kwargs):
logging.getLogger().warning("Call to deprecated function: %s." % func.__name__)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def unifyDicts(d1, d2):
'''
Adds all key-value pairs from d2 to d1.
'''
for key in d2:
d1[key] = d2[key]
def dict_union(d1, d2):
'''
Returns a new dict containing all items from d1 and d2. Entries in d1 are
overridden by the respective items in d2.
'''
d_new = {}
for key, value in d1.iteritems():
d_new[key] = value
for key, value in d2.iteritems():
d_new[key] = value
return d_new
def dict_subset(subset, superset):
'''
Checks whether or not a dictionary is a subset of another dictionary.
'''
return all(item in superset.items() for item in subset.items())
class edict(dict):
def __add__(self, d):
return dict_union(self, d)
def __sub__(self, d):
if type(d) in (dict, defaultdict):
ret = dict(self)
for k in d:
del ret[k]
else:
ret = dict(self)
del ret[d]
return ret
class eset(set):
def __add__(self, s):
return set(self).union(s)
def item(s):
'''
Returns an arbitrary item from the given set `s`.
'''
if not s:
raise Exception('Argument of type %s is empty.' % type(s).__name__)
for i in s: break
return i
class temporary_evidence():
'''
Context guard class for enabling convenient handling of temporary evidence in
MRFs using the python `with` statement. This guarantees that the evidence
is set back to the original whatever happens in the `with` block.
:Example:
>> with temporary_evidence(mrf, [0, 0, 0, 1, 0, None, None]) as mrf_:
'''
def __init__(self, mrf, evidence=None):
self.mrf = mrf
self.evidence_backup = list(mrf.evidence)
if evidence is not None:
self.mrf.evidence = evidence
def __enter__(self):
return self.mrf
def __exit__(self, exception_type, exception_value, tb):
if exception_type is not None:
traceback.print_exc()
raise exception_type(exception_value)
self.mrf.evidence = self.evidence_backup
return True
if __name__ == '__main__':
l = [1,2,3]
upto = 2
out(ifnone(upto, len(l)))
out(l[:ifnone(upto, len(l))])
out(cumsum(l,1))
# d = edict({1:2,2:3,'hi':'world'})
# print d
# print d + {'bla': 'blub'}
# print d
# print d - 1
# print d - {'hi': 'bla'}
# print d
#
| {
"content_hash": "6716e3a55f83c0392a08fb35e78aef10",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 153,
"avg_line_length": 27.532673267326732,
"alnum_prop": 0.5619965477560415,
"repo_name": "danielnyga/pracmln",
"id": "2c8ce33066e8e5b1f01506dacf7a6ab6ea93baa7",
"size": "15079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/pracmln/mln/util.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "16327"
},
{
"name": "CMake",
"bytes": "9841"
},
{
"name": "Java",
"bytes": "101"
},
{
"name": "Makefile",
"bytes": "42"
},
{
"name": "Python",
"bytes": "1659815"
},
{
"name": "Shell",
"bytes": "188"
},
{
"name": "TeX",
"bytes": "243"
}
],
"symlink_target": ""
} |
import copy # for deepcopy function
class Matrix(object):
'''This is the main class for all 2x2 and 3x3 matrices.
It contains all functions that can be used commonly for both the types of matrices that we deal with in this project.
It serves as the parent class for the classes: twoBytwo and threeBythree.
'''
def __init__(self,A,order):
self.A = copy.deepcopy(A)
self.order = order
def __str__(self): # overides the str function and prints the numbers in the form of a matrix (in our case, it returns HTML to facilitate printing on the webpage)
returned_string = "<table>"
for i in range(1,self.order+1):
returned_string += "<tr>"
for j in range(1,self.order+1):
returned_string += "<td>" + str(self.A[i][j]) + "</td>"
returned_string += "</tr>"
returned_string += '</table>'
return returned_string
def transpose(self): # returns the transpose of 2x2 and 3x3 matrix depending on the order of the Matrix object
inverted = copy.deepcopy(self.A)
for i in range(1,self.order+1):
for j in range(1,self.order+1):
inverted[j][i] = self.A[i][j]
return Matrix(inverted,self.order)
def multiply(self,other): # returns the multiplication of two square matrices of the same order
multi = copy.deepcopy(self.A)
for i in range(1,self.order+1):
for j in range(1,self.order+1):
for k in range(1,self.order+1):
multi[i][j] += self.A[i][k] * other.A[k][j]
multi[i][j] = multi[i][j]-self.A[i][j]
return Matrix(multi,self.order)
class twoBytwo(Matrix):
''' This is a sub-class of the Matrix class and is specific to 2x2 matrices.
We use the OOP concept of inheritance to inherit the initialization, str, transpose and multiply functions from the main class.
This class mainly defines the determinant and inverse functions for 2x2 matrices.
'''
def determinant(self): # returns the determinant of the 2x2 matrix represented by the current object
B = self.A
ans = (B[1][1] * B[2][2]) - (B[2][1] * B[1][2])
return ans
def inverse(self): # returns the inverse of a 2x2 object if it exists, or informs the user in case the determinant does not exist
det = self.determinant()
if det == 0:
return "Inverse does not exist."
adj = copy.deepcopy(self.A)
adj[1][1] = round(self.A[2][2]/float(det),3)
adj[2][2] = round(self.A[1][1]/float(det),3)
adj[1][2] = round(-adj[1][2]/float(det),3)
adj[2][1] = round(-adj[2][1]/float(det),3)
return Matrix(adj,2)
class threeBythree(Matrix):
''' This is a sub-class of the Matrix class and is specific to 3x3 matrices.
We use the OOP concept of inheritance to inherit the initialization, str, transpose and multiply functions from the main class.
This class mainly defines the determinant and inverse functions for 3x3 matrices.
'''
def determinant(self): # returns the determinant of the 3x3 matrix represented by the current object
B = self.A
ans = 0
for x in range(1,self.order+1):
z = []
for i in range(2,self.order+1):
for j in range(1,self.order+1):
if j==x:
pass
else:
z.append(B[i][j])
Z = [None,[None,z[0], z[1]], [None,z[2], z[3]]]
twoBytwoSub = twoBytwo(copy.deepcopy(Z),2)
subDeterminant = twoBytwoSub.determinant()
if x%2==0:
ans += -B[1][x] * subDeterminant
else:
ans += B[1][x] * subDeterminant
return ans
def inverse(self): # returns the inverse of a 3x3 object if it exists, or informs the user in case the determinant does not exist
B = self.A
cofactors = copy.deepcopy(self.A)
coFactorElem = 0
mainDeterminant = self.determinant()
if mainDeterminant == 0:
return "Inverse does not exist."
for x in range(1,4):
for y in range(1,4):
z = []
for i in range(1,4):
for j in range(1,4):
if (x==i) or (y==j):
pass
else:
z.append(B[i][j])
Z = [None,[None,z[0], z[1]], [None,z[2], z[3]]]
twoBytwoSub = twoBytwo(copy.deepcopy(Z),2)
subDeterminant = twoBytwoSub.determinant()
if ((x+y)%2 == 0):
coFactorElem = subDeterminant
else:
coFactorElem = -1 * subDeterminant
cofactors[x][y] = coFactorElem
cofactorsMatrix = threeBythree(cofactors,3)
adjoint = cofactorsMatrix.transpose()
for i in range(1,4):
for j in range(1,4):
adjoint.A[i][j] = round(adjoint.A[i][j]/float(mainDeterminant),3)
return adjoint
if __name__ == '__main__': # the following code is only for testing purposes...this module is imported into app.py for its main use in the webapp.
order = 2
if order == 2:
a11 = 1
a12 = 2
a21 = 3
a22 = 4
A = [None,[None,a11, a12], [None,a21, a22]]
our_matrix = twoBytwo(copy.deepcopy(A),order)
x = our_matrix.inverse()
print x
if order == 3:
a11 = 1
a12 = 2
a13 = 3
a21 = 4
a22 = 5
a23 = 6
a31 = 7
a32 = 6
a33 = 8
A = [None,[None,a11, a12, a13], [None,a21, a22, a23], [None,a31, a32, a33]]
matrixA = threeBythree(copy.deepcopy(A),order)
x = matrixA.inverse()
print x | {
"content_hash": "83bf6c2a09ab8b3b0c5a902a37d2a32e",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 163,
"avg_line_length": 29.43452380952381,
"alnum_prop": 0.6537917087967644,
"repo_name": "agdhruv/cs101-matrices",
"id": "c20899f1c836f274620773895190d662da3f01d7",
"size": "4945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "2383"
},
{
"name": "JavaScript",
"bytes": "2154"
},
{
"name": "Python",
"bytes": "7132"
}
],
"symlink_target": ""
} |
contacts = {
'Shannon': '202-555-1234',
'Amy': '410-515-3000',
'Jen': '301-600-5555',
'Julie': '202-333-9876'
}
# We can use the dictionary method .values() to give us a list of all of the values in contacts.
print contacts.values()
for phone in contacts.values():
print "{0}".format(phone)
# .values() is used less frequently than .keys() since you can't get the key from the value (but you can get the value if you know the key)
# Use .values() when you don't care what the key is, you just want a list of all of the values. It's less common, but still good to know. | {
"content_hash": "be72042c54c1f98df06b9e99c0d11db4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 139,
"avg_line_length": 34.94117647058823,
"alnum_prop": 0.6717171717171717,
"repo_name": "keum/python-lessons",
"id": "ea03e01d93b6b217e84d348d54f243d79959c888",
"size": "699",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "section_10_(dictionaries)/dict_values.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import tarfile
import zipfile
from io import BytesIO
from textwrap import dedent
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import (
ArchiveFieldSet,
ArchiveTarget,
Files,
FilesSources,
RelocatedFiles,
RelocateFilesViaCodegenRequest,
)
from pants.core.target_types import rules as target_type_rules
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.source_files import rules as source_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_SNAPSHOT, DigestContents, FileContent
from pants.engine.target import (
GeneratedSources,
Sources,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_relocated_files() -> None:
rule_runner = RuleRunner(
rules=[
*target_type_rules(),
*source_files_rules(),
QueryRule(GeneratedSources, [RelocateFilesViaCodegenRequest]),
QueryRule(TransitiveTargets, [TransitiveTargetsRequest]),
QueryRule(SourceFiles, [SourceFilesRequest]),
],
target_types=[Files, RelocatedFiles],
)
def assert_prefix_mapping(
*,
original: str,
src: str,
dest: str,
expected: str,
) -> None:
rule_runner.create_file(original)
rule_runner.add_to_build_file(
"",
dedent(
f"""\
files(name="original", sources=[{repr(original)}])
relocated_files(
name="relocated",
files_targets=[":original"],
src={repr(src)},
dest={repr(dest)},
)
"""
),
overwrite=True,
)
tgt = rule_runner.get_target(Address("", target_name="relocated"))
result = rule_runner.request(
GeneratedSources, [RelocateFilesViaCodegenRequest(EMPTY_SNAPSHOT, tgt)]
)
assert result.snapshot.files == (expected,)
# We also ensure that when looking at the transitive dependencies of the `relocated_files`
# target and then getting all the code of that closure, we only end up with the relocated
# files. If we naively marked the original files targets as a typical `Dependencies` field,
# we would hit this issue.
transitive_targets = rule_runner.request(
TransitiveTargets, [TransitiveTargetsRequest([tgt.address])]
)
all_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(
(tgt.get(Sources) for tgt in transitive_targets.closure),
enable_codegen=True,
for_sources_types=(FilesSources,),
)
],
)
assert all_sources.snapshot.files == (expected,)
# No-op.
assert_prefix_mapping(original="old_prefix/f.ext", src="", dest="", expected="old_prefix/f.ext")
assert_prefix_mapping(
original="old_prefix/f.ext",
src="old_prefix",
dest="old_prefix",
expected="old_prefix/f.ext",
)
# Remove prefix.
assert_prefix_mapping(original="old_prefix/f.ext", src="old_prefix", dest="", expected="f.ext")
assert_prefix_mapping(
original="old_prefix/subdir/f.ext", src="old_prefix", dest="", expected="subdir/f.ext"
)
# Add prefix.
assert_prefix_mapping(original="f.ext", src="", dest="new_prefix", expected="new_prefix/f.ext")
assert_prefix_mapping(
original="old_prefix/f.ext",
src="",
dest="new_prefix",
expected="new_prefix/old_prefix/f.ext",
)
# Replace prefix.
assert_prefix_mapping(
original="old_prefix/f.ext",
src="old_prefix",
dest="new_prefix",
expected="new_prefix/f.ext",
)
assert_prefix_mapping(
original="old_prefix/f.ext",
src="old_prefix",
dest="new_prefix/subdir",
expected="new_prefix/subdir/f.ext",
)
# Replace prefix, but preserve a common start.
assert_prefix_mapping(
original="common_prefix/foo/f.ext",
src="common_prefix/foo",
dest="common_prefix/bar",
expected="common_prefix/bar/f.ext",
)
assert_prefix_mapping(
original="common_prefix/subdir/f.ext",
src="common_prefix/subdir",
dest="common_prefix",
expected="common_prefix/f.ext",
)
def test_archive() -> None:
"""Integration test for the `archive` target type.
This tests some edges:
* Using both `files` and `relocated_files`.
* An `archive` containing another `archive`.
"""
rule_runner = RuleRunner(
rules=[
*target_type_rules(),
*pex_from_targets.rules(),
*package_pex_binary.rules(),
*target_types_rules.rules(),
QueryRule(BuiltPackage, [ArchiveFieldSet]),
],
target_types=[ArchiveTarget, Files, RelocatedFiles, PexBinary],
)
rule_runner.set_options(
["--backend-packages=pants.backend.python"], env_inherit={"PATH", "PYENV_ROOT", "HOME"}
)
rule_runner.create_file("resources/d1.json", "{'k': 1}")
rule_runner.create_file("resources/d2.json", "{'k': 2}")
rule_runner.add_to_build_file(
"resources",
dedent(
"""\
files(name='original_files', sources=['*.json'])
relocated_files(
name='relocated_files',
files_targets=[':original_files'],
src="resources",
dest="data",
)
"""
),
)
rule_runner.create_file("project/app.py", "print('hello world!')")
rule_runner.add_to_build_file("project", "pex_binary(entry_point='app.py')")
rule_runner.add_to_build_file(
"",
dedent(
"""\
archive(
name="archive1",
packages=["project"],
files=["resources:original_files"],
format="zip",
)
archive(
name="archive2",
packages=[":archive1"],
files=["resources:relocated_files"],
format="tar",
output_path="output/archive2.tar",
)
"""
),
)
def get_archive(target_name: str) -> FileContent:
tgt = rule_runner.get_target(Address("", target_name=target_name))
built_package = rule_runner.request(BuiltPackage, [ArchiveFieldSet.create(tgt)])
digest_contents = rule_runner.request(DigestContents, [built_package.digest])
assert len(digest_contents) == 1
return digest_contents[0]
def assert_archive1_is_valid(zip_bytes: bytes) -> None:
io = BytesIO()
io.write(zip_bytes)
with zipfile.ZipFile(io) as zf:
assert set(zf.namelist()) == {
"resources/d1.json",
"resources/d2.json",
"project/project.pex",
}
with zf.open("resources/d1.json", "r") as f:
assert f.read() == b"{'k': 1}"
with zf.open("resources/d2.json", "r") as f:
assert f.read() == b"{'k': 2}"
archive1 = get_archive("archive1")
assert_archive1_is_valid(archive1.content)
archive2 = get_archive("archive2")
assert archive2.path == "output/archive2.tar"
io = BytesIO()
io.write(archive2.content)
io.seek(0)
with tarfile.open(fileobj=io, mode="r:") as tf:
assert set(tf.getnames()) == {"data/d1.json", "data/d2.json", "archive1.zip"}
def get_file(fp: str) -> bytes:
reader = tf.extractfile(fp)
assert reader is not None
return reader.read()
assert get_file("data/d1.json") == b"{'k': 1}"
assert get_file("data/d2.json") == b"{'k': 2}"
assert_archive1_is_valid(get_file("archive1.zip"))
| {
"content_hash": "b20561165541f1fb70734c83f702634e",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 100,
"avg_line_length": 33.103585657370516,
"alnum_prop": 0.5756408713443254,
"repo_name": "jsirois/pants",
"id": "f40cdb506c2522ff3a8d60086f5e92a81a9b900f",
"size": "8441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/core/target_types_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6008"
},
{
"name": "Mustache",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "2837069"
},
{
"name": "Rust",
"bytes": "1241058"
},
{
"name": "Shell",
"bytes": "57720"
},
{
"name": "Starlark",
"bytes": "27937"
}
],
"symlink_target": ""
} |
import json
def main():
vertex_list = []
with open('../model/exxos_mask.json', 'rb') as fp:
data = json.load(fp)
# print(data)
for _object in data:
_object_dict = data[_object]['object']
_object_name = _object_dict['object_name']
print('Found an object, named \'%s\'' % _object_name)
for _segment in _object_dict['segments']:
for _vertex_as_array in _segment:
print(_vertex_as_array)
# _vertex = Vector3()
main() | {
"content_hash": "f8bfe26e732516a7366947e03645fe06",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 55,
"avg_line_length": 21.136363636363637,
"alnum_prop": 0.5956989247311828,
"repo_name": "astrofra/amiga-experiments",
"id": "2ae69ab861dfe9a766535189ba68807e474bde4e",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wireframe-exxos-mask/toolchain/json2c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "129056"
},
{
"name": "Batchfile",
"bytes": "3287"
},
{
"name": "C",
"bytes": "542837"
},
{
"name": "C++",
"bytes": "2616"
},
{
"name": "Lua",
"bytes": "168275"
},
{
"name": "MAXScript",
"bytes": "3821"
},
{
"name": "Makefile",
"bytes": "3217"
},
{
"name": "Objective-C",
"bytes": "1712"
},
{
"name": "Python",
"bytes": "70549"
}
],
"symlink_target": ""
} |
import sys, os
import cloud_sptheme as csp
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#if on_rtd:
# extensions = ['sphinx.ext.autodoc','sphinxcontrib.plantuml']
# display_github = False
#else:
extensions = ['sphinx.ext.autodoc','rst2pdf.pdfbuilder','sphinxcontrib.plantuml']
#,'sphinxcontrib.fancybox']
plantuml = ['java','-jar','/sbin/plantuml.jar']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
# master_doc = 'contents'
# General information about the project.
project = u'Mirantis OpenStack'
copyright = u'2014, Mirantis Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.1'
# The full version, including alpha/beta/rc tags.
release = '4.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_*', "pages", 'pdf', 'contents', 'index', '*-guide']
# exclude_patterns = ['_*', 'rn_index.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "mirantis"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { "roottarget": "index" }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_templates", csp.get_theme_dir()]
html_add_permalinks = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = project + ' v' + release + ' | Documentation'
html_title = 'Mirantis OpenStack' + 'v' + release + ' | Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = '_static/fuel-logo.png'
html_logo = '_static/fuel_gradient_200.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/mirantis_icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%c, %Z'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'doc_license.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'fueldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'fuel.tex', u'Mirantis OpenStack | Documentation',
u'Mirantis Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuel', u'Mirantis OpenStack | Documentation',
[u'Mirantis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'fuel', u'Mirantis OpenStack | Documentation',
u'Mirantis Inc.', 'fuel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Additional Settings -------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag']
# The encoding of source files.
source_encoding = 'utf-8'
#source_encoding = 'shift_jis'
# The language for content autogenerated by Sphinx.
#language = 'en'
#language = 'ja'
# Enable Antialiasing
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
('relnotes/index', u'Mirantis-OpenStack-4.1-RelNotes', u'Release Notes',
u'2014, Mirantis Inc.')
# (master_doc, project, project, copyright),
]
pdf_stylesheets = ['letter', 'mirantis']
pdf_style_path = ['_templates']
#pdf_language = "en"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
pdf_break_level = 1
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
#pdf_use_modindex = False
# If false, no coverpage is generated.
# pdf_use_coverpage = False
# Name of the cover page template to use
pdf_cover_template = 'mirantiscover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
# pdf_page_template = 'cutePage'
pdf_page_template = 'oneColumn'
# Show Table Of Contents at the beginning?
pdf_use_toc = False
# How many levels deep should the table of contents be?
pdf_toc_depth = 2
# Add section number to section references
#pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
pdf_font_path = ['C:\\Windows\\Fonts\\', '/usr/share/fonts', '_fonts']
| {
"content_hash": "47c7f90cd53bad5dd1f213ed7db68c40",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 151,
"avg_line_length": 31.70294117647059,
"alnum_prop": 0.7011782169032378,
"repo_name": "Mellanox/fuel-docs",
"id": "d6b762ec89d802648aba6ce97fd584da7ad58978",
"size": "11196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relnotes/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from polyaxon.constants.globals import UNKNOWN
from polyaxon.utils.path_utils import module_type
from traceml.events import V1EventChart, V1EventChartKind
from traceml.logger import logger
from traceml.processors.errors import (
BOKEH_ERROR_MESSAGE,
MATPLOTLIB_ERROR_MESSAGE,
PLOTLY_ERROR_MESSAGE,
)
try:
import numpy as np
except ImportError:
np = None
def bokeh_chart(figure) -> V1EventChart:
try:
from bokeh.embed import json_item
except ImportError:
logger.warning(BOKEH_ERROR_MESSAGE)
return UNKNOWN
return V1EventChart(kind=V1EventChartKind.BOKEH, figure=json_item(figure))
def altair_chart(figure) -> V1EventChart:
return V1EventChart(kind=V1EventChartKind.VEGA, figure=figure.to_dict())
def plotly_chart(figure) -> V1EventChart:
try:
import plotly.tools
from traceml.vendor.matplotlylib import mpl_to_plotly
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
if module_type(figure, "matplotlib.figure.Figure"):
figure = mpl_to_plotly(figure)
else:
figure = plotly.tools.return_figure_from_figure_or_data(
figure, validate_figure=True
)
return V1EventChart(kind=V1EventChartKind.PLOTLY, figure=figure)
def mpl_plotly_chart(figure, close: bool = True) -> V1EventChart:
try:
import plotly.tools
from plotly import optional_imports
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
except ImportError:
logger.warning(MATPLOTLIB_ERROR_MESSAGE)
if module_type(figure, "matplotlib.figure.Figure"):
pass
else:
if figure == matplotlib.pyplot:
figure = figure.gcf()
elif not isinstance(figure, Figure):
if hasattr(figure, "figure"):
figure = figure.figure
# Some matplotlib objects have a figure function
if not isinstance(figure, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
from traceml.vendor.matplotlylib import mpl_to_plotly
plotly_figure = mpl_to_plotly(figure)
result = plotly_chart(figure=plotly_figure)
if close:
try:
plt.close(figure.number)
except Exception: # noqa
plt.close(figure)
return result
| {
"content_hash": "e3a007f858b616ad5abfe274caf292a5",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 98,
"avg_line_length": 28.9438202247191,
"alnum_prop": 0.6583850931677019,
"repo_name": "polyaxon/polyaxon",
"id": "ecf47af621936ebb4699b7f2bcf1275037bb7a4c",
"size": "3181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traceml/traceml/processors/events_processors/events_charts_processors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
"""cinema: director."""
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cinema.models.commoninfo import CommonInfo
class Director(CommonInfo):
"""Director."""
name = models.CharField(_("name"), max_length=80, help_text=_("Director's name."))
film = models.ManyToManyField('Film', through='FilmDirector', verbose_name=_("film"))
def __unicode__(self):
return self.name
class Meta(CommonInfo.Meta):
app_label = 'cinema'
verbose_name = _('director')
verbose_name_plural = _('directors')
class FilmDirector(models.Model):
"""Films of directors."""
film = models.ForeignKey('Film', verbose_name=_("film"), related_name='films')
director = models.ForeignKey('Director', verbose_name=_("director"), related_name='directors')
def __unicode__(self):
# Translators: Film by director
return _('%(film)s by %(director)s') % {'film': self.film, 'director': self.director}
class Meta:
app_label = 'cinema'
db_table = 'cinema_film_director'
verbose_name = _('film of director')
verbose_name_plural = _('films of directors')
unique_together = ('film', 'director',)
| {
"content_hash": "dbed03ebdc2f0137c09209ab79f3f86e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 98,
"avg_line_length": 29.627906976744185,
"alnum_prop": 0.6365777080062794,
"repo_name": "juliotrigo/restfulwebapi",
"id": "65f80db1b777cc8f0e9f678aa4084a1438441f37",
"size": "1299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinema/models/director.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3393"
},
{
"name": "JavaScript",
"bytes": "201"
},
{
"name": "Python",
"bytes": "16567"
}
],
"symlink_target": ""
} |
__author__ = 'mathijs'
import collections
from hbobject import HbObject
import logging
# e.g.
# dependencies =
# {
# 'parent':{'default': PointSet()}
#
# settings =
# {
# 'model':{
# 'default':['linear','seasonal'],
# 'help' :'Model to use in least squares parameter estimation',
# 'valid_set' :['linear','seasonal','quadratic','cubic','custom'],
# 'custom':{
# 'default':[],
# 'help' :'Custom timeseries to use as model component',
# 'slcweight':{
# 'default':[],
# 'help' :'Weights to apply to each timestep in parameter estimation',
# 'item_type' :float,
# 'valid_range' :[0,1]}
# }
def ishash(s):
return len(s)==32 and all([chr(i) for i in range(48,58)+range(97,103)])
class Setting:
def __init__(self, name,
default=None,
mandatory=True,
validate=None,
type=lambda x:x):
assert isinstance(name, str)
self.name = name
self.type = type
if isinstance((type),HbObject):
default = None
self.dependency = True
else:
self.dependency = False
self.value = self.default = default
self.mandatory = mandatory
if validate:
self.validate = validate
def __unicode__(self):
return 'Setting {} [{}]'.format(self.name, self.default)
@property
def api(self):
""" describes what is expected and defaulted
"""
# print 'name ',self.name
# print 'default ',self.default
# print 'type ',self.type
if isinstance(self.type,HbObject):
t = self.type.type
# d = self.
elif self.default:
t = str(type(self.default))
else:
t = str(self.type)
return {'default':str(self.default), 'mandatory':self.mandatory, 'type':t}
def validate(self, testval):
""" Validate a single setting w.r.t. type etc. """
# check hbobject type
if self.type and isinstance(self.type,HbObject):
try:
assert( type(testval) in [str,unicode] and testval.find(':')>0)
tt,hh = testval.split(':')
assert(len(hh)==32)
assert(self.type.type == tt)
except Exception, e:
logging.error(e, exc_info=True)
logging.error('expected HbObject {}, got {}'.format(self.type.type,testval))
return False
# or try to cast to requested type
else:
try:
self.type(testval)
except Exception, e:
logging.error('cannot cast this {} to {}'.format(testval, self.type))
logging.error(e, exc_info=True)
return False
return True
def set(self, val):
if isinstance(val, HbObject):
val = str(val)
elif ishash(val):
# TODO: gettype: read only the type from hbobject instead of all data?
# TODO: alternatively, get type from database.
try:
val = str(HbObject(hash=val))
except Exception as e:
raise NotValidError(val,self.name,e.message)
self.value = self.validated(val)
def validated(self,testval):
# if True:
# groups are resubmitted ech separately
if isinstance(testval, list): #or isinstance(testval, dict):
ok = all([self.validate(i) for i in testval])
else:
ok = self.validate(testval)
if ok:
return testval
else:
raise NotValidError(testval,self.name)
class Settings:
def __init__(self):
self.settings = {}
def add(self, *args, **kwargs):
""" Add a parameter to the settings
Parameters:
positional:
name
default
key=value:
:key :default
mandatory False
help ''
dependency False
"""
new = Setting(*args, **kwargs)
self.settings.update({new.name: new})
def set(self, **kwargs):
""" Set one or more parameters
e.g. set({'aap':'piet', 'banaan':'geel'}
"""
self.errors = []
for k, v in kwargs.iteritems():
if k in self.settings.keys():
try:
self.settings[k].set(v)
except Exception as e:
self.errors.append(e.message)
else:
logging.error('{} is not a setting'.format(k))
@property
def get(self):
"""
Show currently set settings (also inferred from default)
-> normal key:value dict with whatever it is
"""
# for k,v in self.settings.iteritems():
# print k,v,type(v)
return {k: v.value for k, v in self.settings.iteritems()}
@property
def getstr(self):
"""
Give currently set settings (also inferred from default)
-> value always as a string
"""
return {k: str(v.value) for k, v in self.settings.iteritems()}
@property
def dependencies(self):
return [s.name for s in self.settings.values() if s.dependency]
@property
def dependency_dict(self):
return {i:self.get[i] for i in self.dependencies}
@property
def mandatory(self):
return {name:s for name,s in self.settings.iteritems() if s.mandatory}
@property
def valid(self):
ok = [v.value for v in self.settings.values() if v.mandatory]
return all(ok)
@property
def api(self):
return {k: v.api for k,v in self.settings.iteritems()}
class NotValidError(Exception):
def __init__(self, value, parameter,reason=None):
self.msg = '{} is not a valid setting for {} ({})'.format(value, parameter, reason)
if __name__=='__main__':
s = Setting('aap')
s.set('Banaan')
S = Settings()
S.add('aap')
S.add('banaan',validate = lambda x: (x>3))
S.set(aap=1,banaan=5) | {
"content_hash": "9474b3d77972dab59f4418c269be53b6",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 92,
"avg_line_length": 29.301435406698566,
"alnum_prop": 0.5311887655127367,
"repo_name": "mwschouten/procapp",
"id": "05dcfe4a52d413263bd7866c06a5ab133935d04b",
"size": "6125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/experts/tools/hbsettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5249"
},
{
"name": "HTML",
"bytes": "37186"
},
{
"name": "JavaScript",
"bytes": "35184"
},
{
"name": "Matlab",
"bytes": "279"
},
{
"name": "Python",
"bytes": "75462"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
from _timer import * | {
"content_hash": "aff180acf9aa003b96ff31e0322d2026",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 20,
"alnum_prop": 0.75,
"repo_name": "utluiz/utluiz.github.io",
"id": "eef14f6fbf01e6c3ad629c653e140a7a89916473",
"size": "20",
"binary": false,
"copies": "115",
"ref": "refs/heads/master",
"path": "pyscript/Lib/browser/timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "340625"
},
{
"name": "Python",
"bytes": "2854600"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import user_management.api.models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthToken',
fields=[
('key', models.CharField(primary_key=True, serialize=False, max_length=40)),
('created', models.DateTimeField(editable=False, default=django.utils.timezone.now)),
('expires', models.DateTimeField(editable=False, default=user_management.api.models.update_expiry)),
],
),
]
| {
"content_hash": "915d528b03599c14c86dd7cf206ebc6f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 116,
"avg_line_length": 30.045454545454547,
"alnum_prop": 0.6354009077155824,
"repo_name": "incuna/django-user-management",
"id": "f24ab98d8112e9332a4100b2cb7f6803853331ec",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_management/tests/testmigrations/api/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1163"
},
{
"name": "Makefile",
"bytes": "443"
},
{
"name": "Python",
"bytes": "179550"
}
],
"symlink_target": ""
} |
import pytest
import sqlparse
from sqlparse import sql, tokens as T
def test_issue9():
# make sure where doesn't consume parenthesis
p = sqlparse.parse('(where 1)')[0]
assert isinstance(p, sql.Statement)
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Parenthesis)
prt = p.tokens[0]
assert len(prt.tokens) == 3
assert prt.tokens[0].ttype == T.Punctuation
assert prt.tokens[-1].ttype == T.Punctuation
def test_issue13():
parsed = sqlparse.parse("select 'one';\n"
"select 'two\\'';\n"
"select 'three';")
assert len(parsed) == 3
assert str(parsed[1]).strip() == "select 'two\\'';"
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
p = sqlparse.parse(s)[0]
assert len(p.tokens) == 1
assert p.tokens[0].ttype is T.Comment.Single
@pytest.mark.parametrize('value', ['create', 'CREATE'])
def test_issue34(value):
t = sqlparse.parse("create")[0].token_first()
assert t.match(T.Keyword.DDL, value) is True
def test_issue35():
# missing space before LIMIT. Updated for #321
sql = sqlparse.format("select * from foo where bar = 1 limit 1",
reindent=True)
assert sql == "\n".join([
"select *",
"from foo",
"where bar = 1",
"limit 1"])
def test_issue38():
sql = sqlparse.format("SELECT foo; -- comment", strip_comments=True)
assert sql == "SELECT foo;"
sql = sqlparse.format("/* foo */", strip_comments=True)
assert sql == ""
def test_issue39():
p = sqlparse.parse('select user.id from user')[0]
assert len(p.tokens) == 7
idt = p.tokens[2]
assert idt.__class__ == sql.Identifier
assert len(idt.tokens) == 3
assert idt.tokens[0].match(T.Name, 'user') is True
assert idt.tokens[1].match(T.Punctuation, '.') is True
assert idt.tokens[2].match(T.Name, 'id') is True
def test_issue40():
# make sure identifier lists in subselects are grouped
p = sqlparse.parse('SELECT id, name FROM '
'(SELECT id, name FROM bar) as foo')[0]
assert len(p.tokens) == 7
assert p.tokens[2].__class__ == sql.IdentifierList
assert p.tokens[-1].__class__ == sql.Identifier
assert p.tokens[-1].get_name() == 'foo'
sp = p.tokens[-1].tokens[0]
assert sp.tokens[3].__class__ == sql.IdentifierList
# make sure that formatting works as expected
s = sqlparse.format('SELECT id == name FROM '
'(SELECT id, name FROM bar)', reindent=True)
assert s == '\n'.join([
'SELECT id == name',
'FROM',
' (SELECT id,',
' name',
' FROM bar)'])
s = sqlparse.format('SELECT id == name FROM '
'(SELECT id, name FROM bar) as foo', reindent=True)
assert s == '\n'.join([
'SELECT id == name',
'FROM',
' (SELECT id,',
' name',
' FROM bar) as foo'])
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
p = sqlparse.parse(s)[0]
i = p.tokens[2]
assert isinstance(i, sql.Identifier)
func = getattr(i, func_name)
assert func() == result
def test_issue83():
sql = """ CREATE OR REPLACE FUNCTION func_a(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
CREATE OR REPLACE FUNCTION func_b(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
ALTER TABLE..... ;"""
t = sqlparse.split(sql)
assert len(t) == 3
def test_comment_encoding_when_reindent():
# There was an UnicodeEncodeError in the reindent filter that
# casted every comment followed by a keyword to str.
sql = 'select foo -- Comment containing Ümläuts\nfrom bar'
formatted = sqlparse.format(sql, reindent=True)
assert formatted == sql
def test_parse_sql_with_binary():
# See https://github.com/andialbrecht/sqlparse/pull/88
# digest = '|ËêplL4¡høN{'
digest = '\x82|\xcb\x0e\xea\x8aplL4\xa1h\x91\xf8N{'
sql = "select * from foo where bar = '{}'".format(digest)
formatted = sqlparse.format(sql, reindent=True)
tformatted = "select *\nfrom foo\nwhere bar = '{}'".format(digest)
assert formatted == tformatted
def test_dont_alias_keywords():
# The _group_left_right function had a bug where the check for the
# left side wasn't handled correctly. In one case this resulted in
# a keyword turning into an identifier.
p = sqlparse.parse('FROM AS foo')[0]
assert len(p.tokens) == 5
assert p.tokens[0].ttype is T.Keyword
assert p.tokens[2].ttype is T.Keyword
def test_format_accepts_encoding(load_file):
# issue20
sql = load_file('test_cp1251.sql', 'cp1251')
formatted = sqlparse.format(sql, reindent=True, encoding='cp1251')
tformatted = 'insert into foo\nvalues (1); -- Песня про надежду'
assert formatted == tformatted
def test_stream(get_stream):
with get_stream("stream.sql") as stream:
p = sqlparse.parse(stream)[0]
assert p.get_type() == 'INSERT'
def test_issue90():
sql = ('UPDATE "gallery_photo" SET "owner_id" = 4018, "deleted_at" = NULL,'
' "width" = NULL, "height" = NULL, "rating_votes" = 0,'
' "rating_score" = 0, "thumbnail_width" = NULL,'
' "thumbnail_height" = NULL, "price" = 1, "description" = NULL')
formatted = sqlparse.format(sql, reindent=True)
tformatted = '\n'.join([
'UPDATE "gallery_photo"',
'SET "owner_id" = 4018,',
' "deleted_at" = NULL,',
' "width" = NULL,',
' "height" = NULL,',
' "rating_votes" = 0,',
' "rating_score" = 0,',
' "thumbnail_width" = NULL,',
' "thumbnail_height" = NULL,',
' "price" = 1,',
' "description" = NULL'])
assert formatted == tformatted
def test_except_formatting():
sql = 'SELECT 1 FROM foo WHERE 2 = 3 EXCEPT SELECT 2 FROM bar WHERE 1 = 2'
formatted = sqlparse.format(sql, reindent=True)
tformatted = '\n'.join([
'SELECT 1',
'FROM foo',
'WHERE 2 = 3',
'EXCEPT',
'SELECT 2',
'FROM bar',
'WHERE 1 = 2'])
assert formatted == tformatted
def test_null_with_as():
sql = 'SELECT NULL AS c1, NULL AS c2 FROM t1'
formatted = sqlparse.format(sql, reindent=True)
tformatted = '\n'.join([
'SELECT NULL AS c1,',
' NULL AS c2',
'FROM t1'])
assert formatted == tformatted
def test_issue190_open_file(filepath):
path = filepath('stream.sql')
with open(path) as stream:
p = sqlparse.parse(stream)[0]
assert p.get_type() == 'INSERT'
def test_issue193_splitting_function():
sql = """ CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)
BEGIN
DECLARE y VARCHAR(20);
RETURN x;
END;
SELECT * FROM a.b;"""
statements = sqlparse.split(sql)
assert len(statements) == 2
def test_issue194_splitting_function():
sql = """ CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)
BEGIN
DECLARE y VARCHAR(20);
IF (1 = 1) THEN
SET x = y;
END IF;
RETURN x;
END;
SELECT * FROM a.b;"""
statements = sqlparse.split(sql)
assert len(statements) == 2
def test_issue186_get_type():
sql = "-- comment\ninsert into foo"
p = sqlparse.parse(sql)[0]
assert p.get_type() == 'INSERT'
def test_issue212_py2unicode():
t1 = sql.Token(T.String, 'schöner ')
t2 = sql.Token(T.String, 'bug')
token_list = sql.TokenList([t1, t2])
assert str(token_list) == 'schöner bug'
def test_issue213_leadingws():
sql = " select * from foo"
assert sqlparse.format(sql, strip_whitespace=True) == "select * from foo"
def test_issue227_gettype_cte():
select_stmt = sqlparse.parse('SELECT 1, 2, 3 FROM foo;')
assert select_stmt[0].get_type() == 'SELECT'
with_stmt = sqlparse.parse('WITH foo AS (SELECT 1, 2, 3)'
'SELECT * FROM foo;')
assert with_stmt[0].get_type() == 'SELECT'
with2_stmt = sqlparse.parse("""
WITH foo AS (SELECT 1 AS abc, 2 AS def),
bar AS (SELECT * FROM something WHERE x > 1)
INSERT INTO elsewhere SELECT * FROM foo JOIN bar;""")
assert with2_stmt[0].get_type() == 'INSERT'
def test_issue207_runaway_format():
sql = 'select 1 from (select 1 as one, 2 as two, 3 from dual) t0'
p = sqlparse.format(sql, reindent=True)
assert p == '\n'.join([
"select 1",
"from",
" (select 1 as one,",
" 2 as two,",
" 3",
" from dual) t0"])
def test_token_next_doesnt_ignore_skip_cm():
sql = '--comment\nselect 1'
tok = sqlparse.parse(sql)[0].token_next(-1, skip_cm=True)[1]
assert tok.value == 'select'
@pytest.mark.parametrize('s', [
'SELECT x AS',
'AS'
])
def test_issue284_as_grouping(s):
p = sqlparse.parse(s)[0]
assert s == str(p)
def test_issue315_utf8_by_default():
# Make sure the lexer can handle utf-8 string by default correctly
# digest = '齐天大圣.カラフルな雲.사랑해요'
# The digest contains Chinese, Japanese and Korean characters
# All in 'utf-8' encoding.
digest = (
'\xe9\xbd\x90\xe5\xa4\xa9\xe5\xa4\xa7\xe5\x9c\xa3.'
'\xe3\x82\xab\xe3\x83\xa9\xe3\x83\x95\xe3\x83\xab\xe3\x81\xaa\xe9'
'\x9b\xb2.'
'\xec\x82\xac\xeb\x9e\x91\xed\x95\xb4\xec\x9a\x94'
)
sql = "select * from foo where bar = '{}'".format(digest)
formatted = sqlparse.format(sql, reindent=True)
tformatted = "select *\nfrom foo\nwhere bar = '{}'".format(digest)
assert formatted == tformatted
def test_issue322_concurrently_is_keyword():
s = 'CREATE INDEX CONCURRENTLY myindex ON mytable(col1);'
p = sqlparse.parse(s)[0]
assert len(p.tokens) == 12
assert p.tokens[0].ttype is T.Keyword.DDL # CREATE
assert p.tokens[2].ttype is T.Keyword # INDEX
assert p.tokens[4].ttype is T.Keyword # CONCURRENTLY
assert p.tokens[4].value == 'CONCURRENTLY'
assert isinstance(p.tokens[6], sql.Identifier)
assert p.tokens[6].value == 'myindex'
@pytest.mark.parametrize('s', [
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;',
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop',
])
def test_issue359_index_error_assignments(s):
sqlparse.parse(s)
sqlparse.format(s, strip_comments=True)
def test_issue469_copy_as_psql_command():
formatted = sqlparse.format(
'\\copy select * from foo',
keyword_case='upper', identifier_case='capitalize')
assert formatted == '\\copy SELECT * FROM Foo'
@pytest.mark.xfail(reason='Needs to be fixed')
def test_issue484_comments_and_newlines():
formatted = sqlparse.format('\n'.join([
'Create table myTable',
'(',
' myId TINYINT NOT NULL, --my special comment',
' myName VARCHAR2(100) NOT NULL',
')']),
strip_comments=True)
assert formatted == ('\n'.join([
'Create table myTable',
'(',
' myId TINYINT NOT NULL,',
' myName VARCHAR2(100) NOT NULL',
')']))
def test_issue485_split_multi():
p_sql = '''CREATE OR REPLACE RULE ruled_tab_2rules AS ON INSERT
TO public.ruled_tab
DO instead (
select 1;
select 2;
);'''
assert len(sqlparse.split(p_sql)) == 1
def test_issue489_tzcasts():
p = sqlparse.parse('select bar at time zone \'UTC\' as foo')[0]
assert p.tokens[-1].has_alias() is True
assert p.tokens[-1].get_alias() == 'foo'
def test_issue562_tzcasts():
# Test that whitespace between 'from' and 'bar' is retained
formatted = sqlparse.format(
'SELECT f(HOUR from bar AT TIME ZONE \'UTC\') from foo', reindent=True
)
assert formatted == \
'SELECT f(HOUR\n from bar AT TIME ZONE \'UTC\')\nfrom foo'
def test_as_in_parentheses_indents():
# did raise NoneType has no attribute is_group in _process_parentheses
formatted = sqlparse.format('(as foo)', reindent=True)
assert formatted == '(as foo)'
def test_format_invalid_where_clause():
# did raise ValueError
formatted = sqlparse.format('where, foo', reindent=True)
assert formatted == 'where, foo'
def test_splitting_at_and_backticks_issue588():
splitted = sqlparse.split(
'grant foo to user1@`myhost`; grant bar to user1@`myhost`;')
assert len(splitted) == 2
assert splitted[-1] == 'grant bar to user1@`myhost`;'
| {
"content_hash": "ae1edd2bddc1a9066296aaf278d510f1",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 79,
"avg_line_length": 32.53146853146853,
"alnum_prop": 0.5644167383204357,
"repo_name": "andialbrecht/sqlparse",
"id": "4ffc69f3ea11a82cd1ebaf923accb8ad5fb1d369",
"size": "14010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_regressions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "PLpgSQL",
"bytes": "3680"
},
{
"name": "Python",
"bytes": "217501"
}
],
"symlink_target": ""
} |
class URLNormalizingMiddleware(object):
"""Middleware filter to handle URL normalization."""
# NOTE(morgan): This must be a middleware as changing 'PATH_INFO' after
# the request hits the flask app will not impact routing.
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
"""Normalize URLs."""
# TODO(morgan): evaluate collapsing multiple slashes in this middleware
# e.g. '/v3//auth/tokens -> /v3/auth/tokens
# Removes a trailing slashes from the given path, if any.
if len(environ['PATH_INFO']) > 1 and environ['PATH_INFO'][-1] == '/':
environ['PATH_INFO'] = environ['PATH_INFO'].rstrip('/')
# Rewrites path to root if no path is given
if not environ['PATH_INFO']:
environ['PATH_INFO'] = '/'
return self.app(environ, start_response)
| {
"content_hash": "e189a14f406c6ab1916bef836bd0192d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 38.82608695652174,
"alnum_prop": 0.6136618141097424,
"repo_name": "mahak/keystone",
"id": "faf98249034764f0b158cfb1f1f1c6714dedcf29",
"size": "1483",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/server/flask/request_processing/middleware/url_normalize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "6212093"
},
{
"name": "Shell",
"bytes": "30491"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SingleImage.image'
db.alter_column('content_singleimage', 'image', self.gf('django.db.models.fields.files.ImageField')(max_length=256))
def backwards(self, orm):
# Changing field 'SingleImage.image'
db.alter_column('content_singleimage', 'image', self.gf('django.db.models.fields.files.ImageField')(max_length=100))
models = {
'articles.article': {
'Meta': {'ordering': "('-publish_date', 'title')", 'object_name': 'Article'},
'addthis_use_author': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'addthis_username': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'auto_tag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followup_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followups'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'markup': ('django.db.models.fields.CharField', [], {'default': "'h'", 'max_length': '1'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_articles_rel_+'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'rendered_content': ('django.db.models.fields.TextField', [], {}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['articles.ArticleStatus']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['articles.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'use_addthis_button': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'articles.articlestatus': {
'Meta': {'ordering': "('ordering', 'name')", 'object_name': 'ArticleStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'articles.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'content.article': {
'Meta': {'ordering': "('-publish_date', 'title')", 'object_name': 'Article', '_ormbases': ['articles.Article']},
'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['articles.Article']", 'unique': 'True', 'primary_key': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['content.Category']", 'symmetrical': 'False'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'content.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'content.singleimage': {
'Meta': {'object_name': 'SingleImage'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 3, 0, 0)'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['content'] | {
"content_hash": "d307dcf818e5a5ff9f965f38888871e5",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 191,
"avg_line_length": 74.76612903225806,
"alnum_prop": 0.553985546327257,
"repo_name": "zniper/automag",
"id": "222d980b7086571b0aed407db246bd0128b102a5",
"size": "9295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magazine/apps/content/migrations/0007_auto__chg_field_singleimage_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53454"
},
{
"name": "JavaScript",
"bytes": "76741"
},
{
"name": "Python",
"bytes": "116177"
},
{
"name": "Shell",
"bytes": "904"
}
],
"symlink_target": ""
} |
"""
Test cases for the dircache module
Nick Mathewson
"""
import unittest
from test.test_support import run_unittest, TESTFN
import dircache, os, time, sys, tempfile
class DircacheTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
for fname in os.listdir(self.tempdir):
self.delTemp(fname)
os.rmdir(self.tempdir)
def writeTemp(self, fname):
f = open(os.path.join(self.tempdir, fname), 'w')
f.close()
def mkdirTemp(self, fname):
os.mkdir(os.path.join(self.tempdir, fname))
def delTemp(self, fname):
fname = os.path.join(self.tempdir, fname)
if os.path.isdir(fname):
os.rmdir(fname)
else:
os.unlink(fname)
def test_listdir(self):
## SUCCESSFUL CASES
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, [])
# Check that cache is actually caching, not just passing through.
self.assert_(dircache.listdir(self.tempdir) is entries)
# Directories aren't "files" on Windows, and directory mtime has
# nothing to do with when files under a directory get created.
# That is, this test can't possibly work under Windows -- dircache
# is only good for capturing a one-shot snapshot there.
if sys.platform[:3] not in ('win', 'os2'):
# Sadly, dircache has the same granularity as stat.mtime, and so
# can't notice any changes that occurred within 1 sec of the last
# time it examined a directory.
time.sleep(1)
self.writeTemp("test1")
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, ['test1'])
self.assert_(dircache.listdir(self.tempdir) is entries)
## UNSUCCESSFUL CASES
self.assertRaises(OSError, dircache.listdir, self.tempdir+"_nonexistent")
def test_annotate(self):
self.writeTemp("test2")
self.mkdirTemp("A")
lst = ['A', 'test2', 'test_nonexistent']
dircache.annotate(self.tempdir, lst)
self.assertEquals(lst, ['A/', 'test2', 'test_nonexistent'])
def test_main():
run_unittest(DircacheTests)
if __name__ == "__main__":
test_main()
| {
"content_hash": "1f566a8458e693e706aebed7b5379402",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 31.534246575342465,
"alnum_prop": 0.6198957428323197,
"repo_name": "MalloyPower/parsing-python",
"id": "68f6fc28b6c49292fa0db2a418efcbc17b070ec2",
"size": "2302",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.5/Lib/test/test_dircache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""Reads vehicle status from BMW connected drive portal."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import discovery
from homeassistant.helpers.event import track_utc_time_change
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "bmw_connected_drive"
CONF_REGION = "region"
CONF_READ_ONLY = "read_only"
ATTR_VIN = "vin"
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.Any("north_america", "china", "rest_of_world"),
vol.Optional(CONF_READ_ONLY, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {cv.string: ACCOUNT_SCHEMA}}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
BMW_COMPONENTS = ["binary_sensor", "device_tracker", "lock", "sensor"]
UPDATE_INTERVAL = 5 # in minutes
SERVICE_UPDATE_STATE = "update_state"
_SERVICE_MAP = {
"light_flash": "trigger_remote_light_flash",
"sound_horn": "trigger_remote_horn",
"activate_air_conditioning": "trigger_remote_air_conditioning",
}
def setup(hass, config: dict):
"""Set up the BMW connected drive components."""
accounts = []
for name, account_config in config[DOMAIN].items():
accounts.append(setup_account(account_config, hass, name))
hass.data[DOMAIN] = accounts
def _update_all(call) -> None:
"""Update all BMW accounts."""
for cd_account in hass.data[DOMAIN]:
cd_account.update()
# Service to manually trigger updates for all accounts.
hass.services.register(DOMAIN, SERVICE_UPDATE_STATE, _update_all)
_update_all(None)
for component in BMW_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def setup_account(account_config: dict, hass, name: str) -> "BMWConnectedDriveAccount":
"""Set up a new BMWConnectedDriveAccount based on the config."""
username = account_config[CONF_USERNAME]
password = account_config[CONF_PASSWORD]
region = account_config[CONF_REGION]
read_only = account_config[CONF_READ_ONLY]
_LOGGER.debug("Adding new account %s", name)
cd_account = BMWConnectedDriveAccount(username, password, region, name, read_only)
def execute_service(call):
"""Execute a service for a vehicle.
This must be a member function as we need access to the cd_account
object here.
"""
vin = call.data[ATTR_VIN]
vehicle = cd_account.account.get_vehicle(vin)
if not vehicle:
_LOGGER.error("Could not find a vehicle for VIN %s", vin)
return
function_name = _SERVICE_MAP[call.service]
function_call = getattr(vehicle.remote_services, function_name)
function_call()
if not read_only:
# register the remote services
for service in _SERVICE_MAP:
hass.services.register(
DOMAIN, service, execute_service, schema=SERVICE_SCHEMA
)
# update every UPDATE_INTERVAL minutes, starting now
# this should even out the load on the servers
now = dt_util.utcnow()
track_utc_time_change(
hass,
cd_account.update,
minute=range(now.minute % UPDATE_INTERVAL, 60, UPDATE_INTERVAL),
second=now.second,
)
return cd_account
class BMWConnectedDriveAccount:
"""Representation of a BMW vehicle."""
def __init__(
self, username: str, password: str, region_str: str, name: str, read_only
) -> None:
"""Constructor."""
from bimmer_connected.account import ConnectedDriveAccount
from bimmer_connected.country_selector import get_region_from_name
region = get_region_from_name(region_str)
self.read_only = read_only
self.account = ConnectedDriveAccount(username, password, region)
self.name = name
self._update_listeners = []
def update(self, *_):
"""Update the state of all vehicles.
Notify all listeners about the update.
"""
_LOGGER.debug(
"Updating vehicle state for account %s, notifying %d listeners",
self.name,
len(self._update_listeners),
)
try:
self.account.update_vehicle_states()
for listener in self._update_listeners:
listener()
except OSError as exception:
_LOGGER.error(
"Could not connect to the BMW Connected Drive portal. "
"The vehicle state could not be updated."
)
_LOGGER.exception(exception)
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
| {
"content_hash": "43f1fa02af013cc3e0455c626f92e865",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 88,
"avg_line_length": 32.16883116883117,
"alnum_prop": 0.6483649576100121,
"repo_name": "Cinntax/home-assistant",
"id": "8e67da86dc3014a034677aaa447e60ae32e3fce5",
"size": "4954",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/bmw_connected_drive/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
'''
Created on Mar 27, 2015
@author: maxz
'''
import seaborn as sns
import itertools
from GPyNotebook.util import lim
from GPyNotebook.plotting.plot import LabelDictPlot
class Scatter(LabelDictPlot):
def __init__(self, X, lab_dict, colors=None, markers=None, figsize=(4,3), figtype=None, *args, **kwargs):
"""
if colors is a string, we call sns.color_palette(colors, size(labels)) as the colors.
"""
super(Scatter, self).__init__(lab_dict, figsize, figtype, *args, **kwargs)
self.X = X
self.xdim, self.ydim = 0, 1
if colors is None:
colors = sns.color_palette()
self.colors = colors
if markers is None:
markers = '<>sdo'
self.markers = markers
self.ax.set_xlabel('Dimension {}'.format(self.xdim))
self.ax.set_ylabel('Dimension {}'.format(self.ydim))
self.labels_updated()
self.redraw()
def redraw(self):
#self.value='Loading...'
x, y = self.X[:, self.xdim], self.X[:, self.ydim]
for l, s in zip(self.ulabels, self.scatters):
f = self.labels==l
s.set_data(x[f], y[f])
self.ax.set_xlim(lim(x))
self.ax.set_ylim(lim(y))
self.draw()
def labels_updated(self):
#self.value = 'Loading...'
for _ in range(len(self.ax.lines)):
self.ax.lines[0].remove()
self.scatters = []
if isinstance(self.colors, str):
c = iter(sns.color_palette(self.colors, self.ulabels.size))
else:
c = itertools.cycle(self.colors)
m = itertools.cycle(self.markers)
for l in self.ulabels:
self.scatters.extend(self.ax.plot([], [], marker=m.next(), markeredgecolor='none', ls='', markerfacecolor=c.next(), alpha=.7, label=l))
self.redraw()
def change_x_dim(self, name, old, new):
self.xdim = new
self.ax.set_xlabel('Dimension {}'.format(self.xdim))
self.redraw()
def change_y_dim(self, name, old, new):
self.ydim = new
self.ax.set_ylabel('Dimension {}'.format(self.ydim))
self.redraw()
| {
"content_hash": "2971a4ae1363ebdf247fd5fa3b63b765",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 147,
"avg_line_length": 31.956521739130434,
"alnum_prop": 0.5573696145124717,
"repo_name": "mzwiessele/GPyNotebook",
"id": "ad9ac2f26191d0ca3096bb91bc83443a725929f9",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GPyNotebook/plotting/scatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9660"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_spice_collective_miner_human_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "27aaf690974a16259e0bdc8adeae37f8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.7037037037037037,
"repo_name": "anhstudios/swganh",
"id": "71cbff82c491ee6482a2daf48753cad98b7a6bc5",
"size": "469",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_spice_collective_miner_human_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Hash import SHA256
pubkey=RSA.importKey('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCbT3x5Uk2aQuExM/mozvXZvoZ3HC5hsHgG18FLAmb2mESHm2ZvPveqDt/dqOop+5hSoY5L5zsfi61Xec9kCKTn6AgVDWMG7EHyr4jyQ5bL6je+jdcIEVgCL+WqkWR16RNZzPIkdkPzxl+6h5DF1vplWggStvZOv5DVvkpFWHMLMQ==')
inbox=ET.parse('email.xml').getroot()
for email in inbox.findall('email'):
for signature in email.findall('signature'):
for message in email.findall('message'):
mes=message.text
hash = SHA256.new(mes).digest()
sig=long(signature.text)
if pubkey.verify(hash, (sig,"")):
print 'verified message: '+mes
break | {
"content_hash": "177b84d614d4853f8e189e4348ee1d58",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 236,
"avg_line_length": 36.526315789473685,
"alnum_prop": 0.792507204610951,
"repo_name": "mitre-cyber-academy/2015-traditional-250b",
"id": "468e579ff866f6793b542af0e82e16f920bd09ad",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shadysolution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7231"
},
{
"name": "Python",
"bytes": "4423"
}
],
"symlink_target": ""
} |
import re
import requests
from bs4 import BeautifulSoup
from models import Torrent
from abc import ABCMeta, abstractmethod
class TorrentApi(object):
__metaclass__ = ABCMeta
def __init__(self, base_url):
protocol_match = re.match(r'^https?://', base_url)
if not protocol_match:
base_url = 'https://' + base_url
self.base_url = base_url if not base_url.endswith('/') else base_url[:-1]
@abstractmethod
def search(self, query):
raise NotImplementedError("Should implement search()!")
class KickAssTorrentApi(TorrentApi):
def __init__(self, base_url='http://kickass.to'):
TorrentApi.__init__(self, base_url)
def search(self, query, page=1):
if page == 1:
search_url = self.base_url + '/search/{}/'.format(query)
else:
search_url = self.base_url + '/search/{}/{}/'.format(query, page)
request = requests.get(search_url)
soup = BeautifulSoup(request.text, "lxml")
error_page = soup.find(name='div', class_='errorpage')
torrents = []
num_pages = 0
if not error_page:
for row in soup.find_all(name='tr', id=re.compile('torrent_')):
torrents.append(self._row_to_torrent(row))
pagination = soup.find(name='div', class_='pages')
num_pages = int(pagination.find_all('a')[-1].find('span').text)
return torrents, num_pages
def _row_to_torrent(self, row):
torrent = Torrent()
torrent.name = row.find(class_='torrentname').find_all('a')[1].text
torrent.magnet_link = row.find(name='a', class_='imagnet').get('href')
torrent.torrent_link = row.find(name='a', class_='idownload').get('href')
tds = row.find_all('td')
torrent.seeders = int(tds[4].text)
torrent.leechers = int(tds[5].text)
torrent.size = 'UNKNOWN' # TODO
return torrent
class PirateBayApi(TorrentApi):
def __init__(self, base_url='https://thepiratebay.org'):
TorrentApi.__init__(self, base_url)
self.size_regex = re.compile('Size (\d+\.?\d*)[^MGK]+([^,]*)')
# page numbers are 0-based for TPB, but we make them 1-based for consistency
def search(self, query, page=1):
search_url = self.base_url + '/search/{}/{}/7/0'.format(query, page - 1)
request = requests.get(search_url)
soup = BeautifulSoup(request.text, "lxml")
table = soup.find(name='table', id='searchResult')
pagination = soup.find(id='main-content').find_next_sibling('div')
num_pages = len(pagination.find_all('a'))
if num_pages == 0: # no pagination links means there is only one page
num_pages = 1
torrents = []
for row in table.find_all('tr', recursive=False):
torrents.append(self._row_to_torrent(row))
return torrents, num_pages
def _row_to_torrent(self, row):
torrent = Torrent()
torrent_name_tag = row.find(class_='detName')
description = row.find(class_='detDesc').text
torrent.name = torrent_name_tag.find(class_='detLink').text
torrent.magnet_link = torrent_name_tag.find_next_sibling('a').get('href')
tds = row.find_all('td')
torrent.seeders = int(tds[2].text)
torrent.leechers = int(tds[3].text)
torrent.size = ' '.join(self.size_regex.search(description).groups())
return torrent
| {
"content_hash": "541b895256008725cbc44f55702706d6",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 81,
"avg_line_length": 36.242105263157896,
"alnum_prop": 0.5965727563171652,
"repo_name": "DandyDev/flood",
"id": "0a8d80870b6011670f718f6f8d2ea6f9bbc9b50e",
"size": "3443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flood/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21614"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/droid/component/shared_item_storage_module_3.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "9dd08ebe2709b1d1bf516702cd930017",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7009345794392523,
"repo_name": "obi-two/Rebelion",
"id": "2c741cb9e45b03e80bfd00b338d6c2509d628945",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/droid/component/shared_item_storage_module_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import re
import sys
import argparse
import compiler
import preprocessor
import config
import cli
import errno
def read_cli_args():
cli.printv("Read CLI arguments.");
# Define command line arguments.
ap = argparse.ArgumentParser('nsbcomp');
ap.add_argument('--input', '-i', action='store', nargs='+',
help='specify the input source files. STDIN if omitted');
ap.add_argument('--output', '-o', action='store', nargs='?',
help='specify the output file. STDOUT if omitted.');
ap.add_argument('--verbose', '-v', action='store_true',
help='print verbose messages to STDOUT.');
ap.add_argument('--preserve-tmp', '-p', action='store_true',
help='preserve tmp files on exit. Debug flag.');
# Parse and return the command line arguments.
return ap.parse_args();
def config_setup():
# Load the configuration file.
try:
config.conf_load();
except IOError as e:
if e.errno == errno.ENOENT:
cli.printe('Failed to load config file: ' + str(e));
return;
config.conf_dump();
if 'INCLUDE_PATHS' in config.config:
preprocessor.set_include_paths(
config.config['INCLUDE_PATHS']
);
if 'DIR_TMP' in config.config:
preprocessor.set_tmp_dir(
config.config['DIR_TMP'][0]
);
def main():
tmp_path = '';
args = read_cli_args();
cli.verbose(args.verbose);
config_setup();
cli.printv("Preprocessing input files.");
try:
tmp_path = preprocessor.multifile_process(args.input);
except (IOError, OSError) as e:
sys.exit(e.errno);
cli.printv("Compiling tmp file: " + tmp_path);
try:
compiler.compile(tmp_path, args.output);
except (IOError, OSError) as e:
if args.preserve_tmp == False:
preprocessor.remove_tmp_data(tmp_path);
if e.errno == errno.ENOENT:
cli.printe(str(e));
sys.exit(e.errno);
if args.preserve_tmp == False:
preprocessor.remove_tmp_data(tmp_path);
if __name__ == '__main__':
main();
| {
"content_hash": "abffe2f7b172c748c113aa38c239f6b2",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 61,
"avg_line_length": 24.526315789473685,
"alnum_prop": 0.6765021459227468,
"repo_name": "eerotal/nsbcomp",
"id": "9c1b34f5ede4046b689b1062f2a75cd62d1e883b",
"size": "1879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nsbcomp/nsbcomp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "10137"
}
],
"symlink_target": ""
} |
"""
This file contains Python code illustrating the creation and manipulation of
vtkTable objects.
"""
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
print "vtkTable Example 1: Building a vtkTable from scratch."
#----------------------------------------------------------
# Create an empty table
T = vtkTable()
#----------------------------------------------------------
# Create Column 1 (IDs)
col1 = vtkIntArray()
col1.SetName("ID")
for i in range(1, 8):
col1.InsertNextValue(i)
T.AddColumn(col1)
#----------------------------------------------------------
# Create Column 2 (Names)
namesList = ['Bob', 'Ann', 'Sue', 'Bill', 'Joe', 'Jill', 'Rick']
col2 = vtkStringArray()
col2.SetName("Name")
for val in namesList:
col2.InsertNextValue(val)
T.AddColumn(col2)
#----------------------------------------------------------
# Create Column 3 (Ages)
agesList = [12, 25, 72, 11, 31, 36, 32]
col3 = vtkIntArray()
col3.SetName("Age")
for val in agesList:
col3.InsertNextValue(val)
T.AddColumn(col3)
T.Dump(6)
print "vtkTable Example 1: Finished." | {
"content_hash": "2c2a1cec4136c4a92b6f98b8d40f5cbf",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 30.604166666666668,
"alnum_prop": 0.4200136147038802,
"repo_name": "timkrentz/SunTracker",
"id": "41004a51f72ace2efac94b3320d1b592b041f8a6",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/Examples/Infovis/Python/tables1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
class LoginForm(object):
def __init__(self, name: str=None, password: str=None):
self._name = name
self._password = password
def __init__(self, post):
self._name = post.get('name') or None
self._password = post.get('password') or None
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def password(self) -> str:
return self._password
@password.setter
def password(self, password: str):
self._password = password
| {
"content_hash": "13fc5dd33ac09a1a50c0ab1dcdb92992",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 59,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.5886075949367089,
"repo_name": "SWE574-Nerds/friendly-eureka",
"id": "a80c7dbb3034d5555a3f77403a3083a5809ee46f",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/eureka/api/dto/LoginForm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4243"
},
{
"name": "HTML",
"bytes": "877139"
},
{
"name": "JavaScript",
"bytes": "4242"
},
{
"name": "Python",
"bytes": "63112"
},
{
"name": "SQLPL",
"bytes": "96298"
},
{
"name": "TypeScript",
"bytes": "62369"
}
],
"symlink_target": ""
} |
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from airflow.exceptions import AirflowException
import time
def invoke_sts_job(sts_id,project):
"""
This function synchronously invokes a transfer service job.
Args:
sts_id (str): the id of your transfer service job provided after the sts creation
project (str): the id of your project
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('storagetransfer', 'v1', credentials=credentials)
project_id = {"projectId": project}
run_request = service.transferJobs().run(jobName=sts_id, body=project_id)
response = run_request.execute()
status = service.transferOperations().get(name=response.get("name")).execute().get("metadata").get("status")
while status != "SUCCESS":
time.sleep(2)
status = service.transferOperations().get(name=response.get("name")).execute().get("metadata").get("status")
print(status)
if status in ("FAILED", "ABORTED", "PAUSED"):
raise AirflowException("sts job failed")
break
return response.get('name') | {
"content_hash": "6e82753b6b1c2cbdfcbc4161538228ae",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 116,
"avg_line_length": 37.935483870967744,
"alnum_prop": 0.6879251700680272,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "91c1f967cd0c99b626f0c16c6da451b4a6345391",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/ccm-service-kit/composer/dags/dependencies/scripts/run_sts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
} |
"""Manages watermarks of PCollections and AppliedPTransforms."""
from __future__ import absolute_import
import threading
from apache_beam import pipeline
from apache_beam import pvalue
from apache_beam.runners.direct.util import TimerFiring
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import TIME_GRANULARITY
class WatermarkManager(object):
"""For internal use only; no backwards-compatibility guarantees.
Tracks and updates watermarks for all AppliedPTransforms."""
WATERMARK_POS_INF = MAX_TIMESTAMP
WATERMARK_NEG_INF = MIN_TIMESTAMP
def __init__(self, clock, root_transforms, value_to_consumers,
transform_keyed_states):
self._clock = clock # processing time clock
self._root_transforms = root_transforms
self._value_to_consumers = value_to_consumers
self._transform_keyed_states = transform_keyed_states
# AppliedPTransform -> TransformWatermarks
self._transform_to_watermarks = {}
for root_transform in root_transforms:
self._transform_to_watermarks[root_transform] = _TransformWatermarks(
self._clock, transform_keyed_states[root_transform], root_transform)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._transform_to_watermarks[consumer] = _TransformWatermarks(
self._clock, transform_keyed_states[consumer], consumer)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._update_input_transform_watermarks(consumer)
def _update_input_transform_watermarks(self, applied_ptransform):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
input_transform_watermarks = []
for input_pvalue in applied_ptransform.inputs:
assert input_pvalue.producer or isinstance(input_pvalue, pvalue.PBegin)
if input_pvalue.producer:
input_transform_watermarks.append(
self.get_watermarks(input_pvalue.producer))
self._transform_to_watermarks[
applied_ptransform].update_input_transform_watermarks(
input_transform_watermarks)
def get_watermarks(self, applied_ptransform):
"""Gets the input and output watermarks for an AppliedPTransform.
If the applied_ptransform has not processed any elements, return a
watermark with minimum value.
Args:
applied_ptransform: AppliedPTransform to get the watermarks for.
Returns:
A snapshot (TransformWatermarks) of the input watermark and output
watermark for the provided transform.
"""
# TODO(altay): Composite transforms should have a composite watermark. Until
# then they are represented by their last transform.
while applied_ptransform.parts:
applied_ptransform = applied_ptransform.parts[-1]
return self._transform_to_watermarks[applied_ptransform]
def update_watermarks(self, completed_committed_bundle, applied_ptransform,
completed_timers, outputs, unprocessed_bundles,
keyed_earliest_holds):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
self._update_pending(
completed_committed_bundle, applied_ptransform, completed_timers,
outputs, unprocessed_bundles)
tw = self.get_watermarks(applied_ptransform)
tw.hold(keyed_earliest_holds)
self._refresh_watermarks(applied_ptransform)
def _update_pending(self, input_committed_bundle, applied_ptransform,
completed_timers, output_committed_bundles,
unprocessed_bundles):
"""Updated list of pending bundles for the given AppliedPTransform."""
# Update pending elements. Filter out empty bundles. They do not impact
# watermarks and should not trigger downstream execution.
for output in output_committed_bundles:
if output.has_elements():
if output.pcollection in self._value_to_consumers:
consumers = self._value_to_consumers[output.pcollection]
for consumer in consumers:
consumer_tw = self._transform_to_watermarks[consumer]
consumer_tw.add_pending(output)
completed_tw = self._transform_to_watermarks[applied_ptransform]
completed_tw.update_timers(completed_timers)
for unprocessed_bundle in unprocessed_bundles:
completed_tw.add_pending(unprocessed_bundle)
assert input_committed_bundle or applied_ptransform in self._root_transforms
if input_committed_bundle and input_committed_bundle.has_elements():
completed_tw.remove_pending(input_committed_bundle)
def _refresh_watermarks(self, applied_ptransform):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
tw = self.get_watermarks(applied_ptransform)
if tw.refresh():
for pval in applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v in self._value_to_consumers: # If there are downstream consumers
consumers = self._value_to_consumers[v]
for consumer in consumers:
self._refresh_watermarks(consumer)
def extract_fired_timers(self):
all_timers = []
for applied_ptransform, tw in self._transform_to_watermarks.iteritems():
fired_timers = tw.extract_fired_timers()
if fired_timers:
all_timers.append((applied_ptransform, fired_timers))
return all_timers
class _TransformWatermarks(object):
"""Tracks input and output watermarks for an AppliedPTransform."""
def __init__(self, clock, keyed_states, transform):
self._clock = clock
self._keyed_states = keyed_states
self._input_transform_watermarks = []
self._input_watermark = WatermarkManager.WATERMARK_NEG_INF
self._output_watermark = WatermarkManager.WATERMARK_NEG_INF
self._keyed_earliest_holds = {}
self._pending = set() # Scheduled bundles targeted for this transform.
self._fired_timers = set()
self._lock = threading.Lock()
self._label = str(transform)
def update_input_transform_watermarks(self, input_transform_watermarks):
with self._lock:
self._input_transform_watermarks = input_transform_watermarks
def update_timers(self, completed_timers):
with self._lock:
for timer_firing in completed_timers:
self._fired_timers.remove(timer_firing)
@property
def input_watermark(self):
with self._lock:
return self._input_watermark
@property
def output_watermark(self):
with self._lock:
return self._output_watermark
def hold(self, keyed_earliest_holds):
with self._lock:
for key, hold_value in keyed_earliest_holds.iteritems():
self._keyed_earliest_holds[key] = hold_value
if (hold_value is None or
hold_value == WatermarkManager.WATERMARK_POS_INF):
del self._keyed_earliest_holds[key]
def add_pending(self, pending):
with self._lock:
self._pending.add(pending)
def remove_pending(self, completed):
with self._lock:
# Ignore repeated removes. This will happen if a transform has a repeated
# input.
if completed in self._pending:
self._pending.remove(completed)
def refresh(self):
with self._lock:
min_pending_timestamp = WatermarkManager.WATERMARK_POS_INF
has_pending_elements = False
for input_bundle in self._pending:
# TODO(ccy): we can have the Bundle class keep track of the minimum
# timestamp so we don't have to do an iteration here.
for wv in input_bundle.get_elements_iterable():
has_pending_elements = True
if wv.timestamp < min_pending_timestamp:
min_pending_timestamp = wv.timestamp
# If there is a pending element with a certain timestamp, we can at most
# advance our watermark to the maximum timestamp less than that
# timestamp.
pending_holder = WatermarkManager.WATERMARK_POS_INF
if has_pending_elements:
pending_holder = min_pending_timestamp - TIME_GRANULARITY
input_watermarks = [
tw.output_watermark for tw in self._input_transform_watermarks]
input_watermarks.append(WatermarkManager.WATERMARK_POS_INF)
producer_watermark = min(input_watermarks)
self._input_watermark = max(self._input_watermark,
min(pending_holder, producer_watermark))
earliest_hold = WatermarkManager.WATERMARK_POS_INF
for hold in self._keyed_earliest_holds.values():
if hold < earliest_hold:
earliest_hold = hold
new_output_watermark = min(self._input_watermark, earliest_hold)
advanced = new_output_watermark > self._output_watermark
self._output_watermark = new_output_watermark
return advanced
@property
def synchronized_processing_output_time(self):
return self._clock.time()
def extract_fired_timers(self):
with self._lock:
if self._fired_timers:
return False
fired_timers = []
for encoded_key, state in self._keyed_states.iteritems():
timers = state.get_timers(watermark=self._input_watermark)
for expired in timers:
window, (name, time_domain, timestamp) = expired
fired_timers.append(
TimerFiring(encoded_key, window, name, time_domain, timestamp))
self._fired_timers.update(fired_timers)
return fired_timers
| {
"content_hash": "04b844e864df69ca5096d0809434873a",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 80,
"avg_line_length": 38.83265306122449,
"alnum_prop": 0.695606474668909,
"repo_name": "staslev/incubator-beam",
"id": "935998d27de0f281f28a08460ab3791fd0d406a6",
"size": "10299",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/direct/watermark_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22449"
},
{
"name": "Java",
"bytes": "9764829"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
import numpy as np
class ClassifierTrainer(object):
""" The trainer class performs SGD with momentum on a cost function """
def __init__(self):
self.step_cache = {} # for storing velocities in momentum update
def train(self, X, y, X_val, y_val,
model, loss_function,
reg=0.0,
learning_rate=1e-2, momentum=0, learning_rate_decay=0.95,
update='momentum', sample_batches=True,
num_epochs=30, batch_size=100, acc_frequency=None,
verbose=False):
"""
Optimize the parameters of a model to minimize a loss function. We use
training data X and y to compute the loss and gradients, and periodically
check the accuracy on the validation set.
Inputs:
- X: Array of training data; each X[i] is a training sample.
- y: Vector of training labels; y[i] gives the label for X[i].
- X_val: Array of validation data
- y_val: Vector of validation labels
- model: Dictionary that maps parameter names to parameter values. Each
parameter value is a numpy array.
- loss_function: A function that can be called in the following ways:
scores = loss_function(X, model, reg=reg)
loss, grads = loss_function(X, model, y, reg=reg)
- reg: Regularization strength. This will be passed to the loss function.
- learning_rate: Initial learning rate to use.
- momentum: Parameter to use for momentum updates.
- learning_rate_decay: The learning rate is multiplied by this after each
epoch.
- update: The update rule to use. One of 'sgd', 'momentum', or 'rmsprop'.
- sample_batches: If True, use a minibatch of data for each parameter update
(stochastic gradient descent); if False, use the entire training set for
each parameter update (gradient descent).
- num_epochs: The number of epochs to take over the training data.
- batch_size: The number of training samples to use at each iteration.
- acc_frequency: If set to an integer, we compute the training and
validation set error after every acc_frequency iterations.
- verbose: If True, print status after each epoch.
Returns a tuple of:
- best_model: The model that got the highest validation accuracy during
training.
- loss_history: List containing the value of the loss function at each
iteration.
- train_acc_history: List storing the training set accuracy at each epoch.
- val_acc_history: List storing the validation set accuracy at each epoch.
"""
N = X.shape[0]
if sample_batches:
iterations_per_epoch = N / batch_size # using SGD
else:
iterations_per_epoch = 1 # using GD
num_iters = num_epochs * iterations_per_epoch
epoch = 0
best_val_acc = 0.0
best_model = {}
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
if it % 500 == 0: print 'starting iteration ', it
# get batch of data
if sample_batches:
batch_mask = np.random.choice(N, batch_size)
X_batch = X[batch_mask]
y_batch = y[batch_mask]
else:
# no SGD used, full gradient descent
X_batch = X
y_batch = y
# evaluate cost and gradient
cost, grads = loss_function(X_batch, model, y_batch, reg)
loss_history.append(cost)
# perform a parameter update
for p in model:
# compute the parameter step
if update == 'sgd':
dx = -learning_rate * grads[p]
elif update == 'momentum':
if not p in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
# dx = np.zeros_like(grads[p]) # you can remove this after
#####################################################################
# TODO: implement the momentum update formula and store the step #
# update into variable dx. You should use the variable #
# step_cache[p] and the momentum strength is stored in momentum. #
# Don't forget to also update the step_cache[p]. #
#####################################################################
self.step_cache[p] = momentum * self.step_cache[p] - learning_rate * grads[p]
dx = self.step_cache[p]
elif update == 'rmsprop':
decay_rate = 0.99 # you could also make this an option
if not p in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
#####################################################################
# TODO: implement the RMSProp update and store the parameter update #
# dx. Don't forget to also update step_cache[p]. Use smoothing 1e-8 #
#####################################################################
self.step_cache[p] = decay_rate * self.step_cache[p] + ( 1 - decay_rate ) * ( grads[p] ** 2 )
dx = - learning_rate * grads[p] / np.sqrt( self.step_cache[p] + 1e-8)
else:
raise ValueError('Unrecognized update type "%s"' % update)
# update the parameters
model[p] += dx
# every epoch perform an evaluation on the validation set
first_it = (it == 0)
epoch_end = (it + 1) % iterations_per_epoch == 0
acc_check = (acc_frequency is not None and it % acc_frequency == 0)
if first_it or epoch_end or acc_check:
if it > 0 and epoch_end:
# decay the learning rate
learning_rate *= learning_rate_decay
epoch += 1
# evaluate train accuracy
if N > 1000:
train_mask = np.random.choice(N, 1000)
X_train_subset = X[train_mask]
y_train_subset = y[train_mask]
else:
X_train_subset = X
y_train_subset = y
scores_train = loss_function(X_train_subset, model)
y_pred_train = np.argmax(scores_train, axis=1)
train_acc = np.mean(y_pred_train == y_train_subset)
train_acc_history.append(train_acc)
# evaluate val accuracy
scores_val = loss_function(X_val, model)
y_pred_val = np.argmax(scores_val, axis=1)
val_acc = np.mean(y_pred_val == y_val)
val_acc_history.append(val_acc)
# keep track of the best model based on validation accuracy
if val_acc > best_val_acc:
# make a copy of the model
best_val_acc = val_acc
best_model = {}
for p in model:
best_model[p] = model[p].copy()
# print progress if needed
if verbose:
print ('Finished epoch %d / %d: cost %f, train: %f, val %f, lr %e'
% (epoch, num_epochs, cost, train_acc, val_acc, learning_rate))
if verbose:
print 'finished optimization. best validation accuracy: %f' % (best_val_acc, )
# return the best model and the training history statistics
return best_model, loss_history, train_acc_history, val_acc_history
| {
"content_hash": "3aa1b9a77d6e8e2a2eaabe5e1a351f13",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 103,
"avg_line_length": 39.23463687150838,
"alnum_prop": 0.5823722056101381,
"repo_name": "Hex-iang/cs231n-practice",
"id": "1a347b7a4d813bda20e345004372629dd1b1bbec",
"size": "7023",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hw2/cs231n/classifier_trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151388"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
} |
__author__ = 'evancox'
import numpy as np
import hashlib
import os
import shutil
import subprocess
import logging
from lxml import etree
from sklearn_pmml.convert import TransformationContext
from sklearn_pmml.convert.features import *
_TARGET = [0, 1, 2]
_TARGET_NAME = 'y'
_TEST_DIR = 'jpmml_test_data'
logging.basicConfig(format='%(asctime)s %(message)s')
#Adapted from http://stackoverflow.com/questions/1724693/find-a-file-in-python
def find_file_or_dir(name):
for root, dirs, files in os.walk('.'):
if name in files or name in dirs:
return os.path.join(root, name)
class JPMMLTest():
@staticmethod
def can_run():
try:
subprocess.check_call(['java', '-version'])
except OSError:
logging.warning("Couldn't find java to run JPMML integration tests")
return False
try:
subprocess.check_call(['mvn', '-version'])
except OSError:
logging.warning("Couldn't find java to run JPMML integration tests")
return False
return True
@staticmethod
def init_jpmml():
result = subprocess.call(['mvn', '-q', 'clean', 'package', '-f', find_file_or_dir('jpmml-csv-evaluator')])
assert result == 0, "Unable to package jpmml csv evaluator"
return True
#taken from http://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml
@staticmethod
def remove_namespace(doc, namespace):
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
@property
def model(self):
if self._model is None:
raise NotImplementedError()
return self._model
@model.setter
def model(self, model):
self._model = model
@property
def output(self):
raise NotImplementedError()
def setup_jpmml_test(self):
if not JPMMLTest.can_run():
logging.warning("Can't run regression test, java and/or maven not installed")
return None
if os.path.exists(_TEST_DIR):
shutil.rmtree(_TEST_DIR)
os.makedirs(_TEST_DIR)
#evancox This is a hack to allow us to mark the version down as 4.1 so we can run with the BSD version of JPMML rather than the AGPL one
xml = self.converter.pmml().toxml("utf-8")
pmml = etree.fromstring(xml)
pmml.set('version', '4.1')
JPMMLTest.remove_namespace(pmml, 'http://www.dmg.org/PMML-4_2')
xml = etree.tostring(pmml, pretty_print=True)
xml = xml.replace('http://www.dmg.org/PMML-4_2', 'http://www.dmg.org/PMML-4_1')
pmml_hash = hashlib.md5(xml).hexdigest()
pmml_file_path = os.path.join(_TEST_DIR, pmml_hash + '.pmml')
with open(pmml_file_path, 'w') as pmml_file:
pmml_file.write(xml)
input_file_path = os.path.join(_TEST_DIR, pmml_hash + '_input.csv')
self.x.to_csv(input_file_path,index=False)
target_file_path = os.path.join(_TEST_DIR, pmml_hash + '_output.csv')
java_args = ' '.join([os.path.abspath(pmml_file_path), os.path.abspath(input_file_path), os.path.abspath(target_file_path)])
result = subprocess.call(['mvn', 'exec:java', '-q', '-f', find_file_or_dir('jpmml-csv-evaluator'), '-Dexec.mainClass=sklearn.pmml.jpmml.JPMMLCSVEvaluator', '-Dexec.args=' + java_args])
self.assertEqual(result, 0, 'Executing jpmml evaluator returned non zero result')
return pd.read_csv(target_file_path)
def init_data(self):
np.random.seed(12363)
self.x = pd.DataFrame(np.random.randn(500, 10))
self.y = pd.DataFrame({_TARGET_NAME:[np.random.choice([0, 1, 2]) for _ in range(self.x.shape[0])]})
self._model.fit(self.x, np.ravel(self.y))
self.ctx = TransformationContext(
input=[RealNumericFeature(col) for col in list(self.x)],
derived=[],
model=[RealNumericFeature(col) for col in list(self.x)],
output=[self.output]
)
class JPMMLRegressionTest(JPMMLTest):
@property
def output(self):
return IntegerNumericFeature(_TARGET_NAME, _TARGET)
def test_regression(self):
jpmml_predictions = self.setup_jpmml_test()
if jpmml_predictions is None:
return
sklearn_predictions = pd.DataFrame({_TARGET_NAME:self.converter.estimator.predict(self.x)})
diff = jpmml_predictions[_TARGET_NAME] - sklearn_predictions[_TARGET_NAME]
self.assertTrue(np.all(np.abs(diff) < .001))
class JPMMLClassificationTest(JPMMLTest):
@property
def output(self):
return IntegerCategoricalFeature(_TARGET_NAME, _TARGET)
def test_classification(self):
jpmml_predictions = self.setup_jpmml_test()
if jpmml_predictions is None:
return
raw_sklearn_predictions = self.converter.estimator.predict_proba(self.x)
prob_outputs = ['Probability_' + str(clazz) for clazz in self.converter.estimator.classes_]
sklearn_predictions = pd.DataFrame(columns=prob_outputs)
for index, prediction in enumerate(raw_sklearn_predictions):
sklearn_predictions.loc[index] = list(prediction)
self.assertTrue(np.all(jpmml_predictions[prob_outputs] == sklearn_predictions))
| {
"content_hash": "a891ae29faccc17a3986b6c044720571",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 192,
"avg_line_length": 33.78125,
"alnum_prop": 0.6329324699352451,
"repo_name": "kod3r/sklearn-pmml",
"id": "fa486c6cd80b497d9901a308b42530444d7f0774",
"size": "5405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn_pmml/convert/test/jpmml_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "5482"
},
{
"name": "Python",
"bytes": "2275295"
}
],
"symlink_target": ""
} |
"""
WSGI config for theaterwecker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "theaterwecker.settings")
application = get_wsgi_application()
| {
"content_hash": "5b120afa04f1dd0e3c7462582e966b36",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.25,
"alnum_prop": 0.7772277227722773,
"repo_name": "CodeforChemnitz/TheaterWecker",
"id": "6e02c34fb009ed55080647db731f94334d9b4fa5",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/theaterwecker/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11055"
},
{
"name": "HTML",
"bytes": "13962"
},
{
"name": "Java",
"bytes": "1639"
},
{
"name": "JavaScript",
"bytes": "35909"
},
{
"name": "Objective-C",
"bytes": "3125"
},
{
"name": "Python",
"bytes": "63414"
},
{
"name": "Ruby",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "1376"
}
],
"symlink_target": ""
} |
import sys
import pytest
import shutil
import numpy as np
import pandas as pd
from pysd.tools.benchmarking import assert_frames_close
from pysd import read_vensim, load
@pytest.fixture(scope="module")
def data_folder(_root):
return _root.joinpath("more-tests/data_model/")
@pytest.fixture(scope="module")
def data_model(data_folder):
return data_folder.joinpath("test_data_model.mdl")
@pytest.fixture
def data_files(data_files_short, data_folder):
if isinstance(data_files_short, str):
return data_folder.joinpath(data_files_short)
elif isinstance(data_files_short, list):
return [data_folder.joinpath(df) for df in data_files_short]
else:
return {
data_folder.joinpath(df): value
for df, value in data_files_short.items()
}
times = np.arange(11)
@pytest.mark.parametrize(
"data_files_short,expected",
[
( # one_file
"data1.tab",
pd.DataFrame(
index=times,
data={'var1': times, "var2": 2*times, "var3": 3*times}
)
),
( # two_files
["data3.tab",
"data1.tab"],
pd.DataFrame(
index=times,
data={'var1': -times, "var2": -2*times, "var3": 3*times}
)
),
( # transposed_file
["data2.tab"],
pd.DataFrame(
index=times,
data={'var1': times-5, "var2": 2*times-5, "var3": 3*times-5}
)
),
( # dict_file
{"data2.tab": ["\"data-3\""],
"data1.tab": ["data_1", "Data 2"]},
pd.DataFrame(
index=times,
data={'var1': times, "var2": 2*times, "var3": 3*times-5}
)
)
],
ids=["one_file", "two_files", "transposed_file", "dict_file"]
)
class TestPySDData:
@pytest.fixture
def model(self, data_model, data_files, shared_tmpdir):
# translated file
file = shared_tmpdir.joinpath(data_model.with_suffix(".py").name)
if file.is_file():
# load already translated file
return load(file, data_files)
else:
# copy mdl file to tmp_dir and translate it
file = shared_tmpdir.joinpath(data_model.name)
shutil.copy(data_model, file)
return read_vensim(file, data_files)
def test_get_data_and_run(self, model, expected):
assert_frames_close(
model.run(return_columns=["var1", "var2", "var3"]),
expected)
def test_modify_data(self, model, expected):
out = model.run(params={
"var1": pd.Series(index=[1, 3, 7], data=[10, 20, 30]),
"var2": 10
})
assert (out["var2"] == 10).all()
assert (
out["var1"] == [10, 10, 15, 20, 22.5, 25, 27.5, 30, 30, 30, 30]
).all()
class TestPySDDataErrors:
def model(self, data_model, data_files, shared_tmpdir):
# translated file
file = shared_tmpdir.joinpath(data_model.with_suffix(".py").name)
if file.is_file():
# load already translated file
return load(file, data_files)
else:
# copy mdl file to tmp_dir and translate it
file = shared_tmpdir.joinpath(data_model.name)
shutil.copy(data_model, file)
return read_vensim(file, data_files)
def test_run_error(self, data_model, shared_tmpdir):
model = self.model(data_model, [], shared_tmpdir)
error_message = "Trying to interpolate data variable before loading"\
+ " the data..."
with pytest.raises(ValueError, match=error_message):
model.run(return_columns=["var1", "var2", "var3"])
@pytest.mark.parametrize(
"data_files_short,raise_type,error_message",
[
( # missing_data
"data3.tab",
ValueError,
"Data for \"data-3\" not found in %s"
),
( # data_variable_not_found_from_dict_file
{"data1.tab": ["non-existing-var"]},
ValueError,
"'non-existing-var' not found as model data variable"
),
],
ids=["missing_data", "data_variable_not_found_from_dict_file"]
)
@pytest.mark.skipif(
sys.platform.startswith("win"),
reason=r"bad scape \e")
def test_loading_error(self, data_model, data_files, raise_type,
error_message, shared_tmpdir):
with pytest.raises(raise_type, match=error_message % (data_files)):
self.model(
data_model, data_files, shared_tmpdir)
| {
"content_hash": "a94208566ce41a6dd0d51cd6a76c8e00",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 30.941176470588236,
"alnum_prop": 0.5386565272496832,
"repo_name": "JamesPHoughton/pysd",
"id": "9bd650970286640b7c270ba52aa8a0522717062d",
"size": "4734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytest_types/data/pytest_data_with_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "850"
},
{
"name": "Python",
"bytes": "771517"
}
],
"symlink_target": ""
} |
from .translate2csv import translate2csv
| {
"content_hash": "0e25ee6afd5d7933933e35bba0c08b4f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.8780487804878049,
"repo_name": "duboviy/study_languages",
"id": "485be08b7fe52ece0484392b897acf9887d37c3c",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "translate2csv/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7753"
}
],
"symlink_target": ""
} |
import os
import socket
import time
from django.core.cache import get_cache
from django.core.files.base import ContentFile
from django.utils import simplejson
from django.utils.encoding import smart_str
from django.utils.functional import SimpleLazyObject
from hashlib import md5
from django.utils.importlib import import_module
from compressor.conf import settings
from compressor.storage import default_storage
from compressor.utils import get_mod_func
_cachekey_func = None
def get_hexdigest(plaintext, length=None):
digest = md5(smart_str(plaintext)).hexdigest()
if length:
return digest[:length]
return digest
def simple_cachekey(key):
return 'django_compressor.%s' % smart_str(key)
def socket_cachekey(key):
return "django_compressor.%s.%s" % (socket.gethostname(), smart_str(key))
def get_cachekey(*args, **kwargs):
global _cachekey_func
if _cachekey_func is None:
try:
mod_name, func_name = get_mod_func(
settings.COMPRESS_CACHE_KEY_FUNCTION)
_cachekey_func = getattr(import_module(mod_name), func_name)
except (AttributeError, ImportError), e:
raise ImportError("Couldn't import cache key function %s: %s" %
(settings.COMPRESS_CACHE_KEY_FUNCTION, e))
return _cachekey_func(*args, **kwargs)
def get_mtime_cachekey(filename):
return get_cachekey("mtime.%s" % get_hexdigest(filename))
def get_offline_hexdigest(source):
return get_hexdigest([smart_str(getattr(s, 's', s)) for s in source])
def get_offline_jinja_hexdigest(nodelist):
return get_hexdigest(unicode(nodelist))
def get_offline_cachekey(source):
return get_cachekey("offline.%s" % get_offline_hexdigest(source))
def get_offline_manifest_filename():
output_dir = settings.COMPRESS_OUTPUT_DIR.strip('/')
return os.path.join(output_dir, settings.COMPRESS_OFFLINE_MANIFEST)
def get_offline_manifest():
filename = get_offline_manifest_filename()
if default_storage.exists(filename):
return simplejson.load(default_storage.open(filename))
else:
return {}
def write_offline_manifest(manifest):
filename = get_offline_manifest_filename()
default_storage.save(filename,
ContentFile(simplejson.dumps(manifest, indent=2)))
def get_templatetag_cachekey(compressor, mode, kind):
return get_cachekey(
"templatetag.%s.%s.%s" % (compressor.cachekey, mode, kind))
def get_mtime(filename):
if settings.COMPRESS_MTIME_DELAY:
key = get_mtime_cachekey(filename)
mtime = cache.get(key)
if mtime is None:
mtime = os.path.getmtime(filename)
cache.set(key, mtime, settings.COMPRESS_MTIME_DELAY)
return mtime
return os.path.getmtime(filename)
def get_hashed_mtime(filename, length=12):
try:
filename = os.path.realpath(filename)
mtime = str(int(get_mtime(filename)))
except OSError:
return None
return get_hexdigest(mtime, length)
def cache_get(key):
packed_val = cache.get(key)
if packed_val is None:
return None
val, refresh_time, refreshed = packed_val
if (time.time() > refresh_time) and not refreshed:
# Store the stale value while the cache
# revalidates for another MINT_DELAY seconds.
cache_set(key, val, refreshed=True,
timeout=settings.COMPRESS_MINT_DELAY)
return None
return val
def cache_set(key, val, refreshed=False, timeout=None):
if timeout is None:
timeout = settings.COMPRESS_REBUILD_TIMEOUT
refresh_time = timeout + time.time()
real_timeout = timeout + settings.COMPRESS_MINT_DELAY
packed_val = (val, refresh_time, refreshed)
return cache.set(key, packed_val, real_timeout)
cache = SimpleLazyObject(lambda: get_cache(settings.COMPRESS_CACHE_BACKEND))
| {
"content_hash": "5a5787cf95c9b567620f8b622313445e",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 30.085271317829456,
"alnum_prop": 0.6851326977583098,
"repo_name": "drawquest/drawquest-web",
"id": "e8a7749035bc0eade2c2bf91b57159bf4a291e2d",
"size": "3881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/compressor/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
"""
RPC-related utility functions
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/24/15'
# Imports
# Stdlib
import argparse
import enum
import math
import re
import sys
# Third-party
from thrift.Thrift import TType
# Constants
FIXME = '// FIXME'
# Classes and functions
class InvalidField(Exception):
def __init__(self, dtype, name, value):
s = 'Value "{}" of field {} is not of type "{}"'
msg = s.format(value, name, dtype.name)
Exception.__init__(self, msg)
class ThriftTypes(enum.Enum):
# can validate these
BOOL = TType.BOOL
DOUBLE = TType.DOUBLE
I16 = TType.I16
I64 = TType.I64
MAP = TType.MAP
VOID = TType.VOID
I32 = TType.I32
I08 = TType.I08
BYTE = TType.BYTE
STRING = TType.STRING
UTF16 = TType.UTF16
UTF8 = TType.UTF8
UTF7 = TType.UTF7
LIST = TType.LIST
SET = TType.SET
# not sure how to validate these
STOP = TType.STOP
STRUCT = TType.STRUCT
class ThriftInputValue(object):
def __init__(self, dtype, value):
if value is None:
self.valid = True # XXX: warning?
self.empty = True
return
self.empty = False
self.valid = True # if not known, it's valid
if dtype.value == TType.STRING:
self.valid = isinstance(value, (str, unicode))
elif dtype.value == TType.BOOL:
self.valid = isinstance(value, bool)
elif dtype.value == TType.DOUBLE:
self.valid = isinstance(value, (int, float))
elif dtype.value in (TType.I08, TType.I16, TType.I32, TType.I64, TType.BYTE):
# Allow floats with no fractional part to get converted.
# Floats with non-zero fractional parts will fail.
if isinstance(value, float) and math.floor(value) == value:
value = int(value)
self.valid = isinstance(value, int)
elif dtype.value in (TType.STRING, TType.UTF16, TType.UTF8,
TType.UTF7):
self.valid = isinstance(value, (str, unicode))
elif dtype.value in (TType.LIST, TType.SET):
self.valid = isinstance(value, (tuple, list, set))
elif dtype.value == TType.MAP:
self.valid = isinstance(value, TType.MAP)
def thrift_validate(obj):
"""Validate an auto-generated datatype from the Thrift `ttypes` module.
Automatically extract the fields to be validated from the `thrift_spec`
attribute in the `obj`.
Validation has the following features:
- unicode or non-unicode strings are equivalent
- an integer value is valid for a floating-point field
- a floating-point value is valid for a floating-point field, if and
only if the value has no fractional part (i.e. floor(value) == value)
Args:
obj: Object to validate.
Return: The input object (for chaining)
"""
assert hasattr(obj, 'thrift_spec')
for item in getattr(obj, 'thrift_spec'):
if item is None:
continue # skip it
dtype, name = ThriftTypes(item[1]), item[2]
value = getattr(obj, name)
iv = ThriftInputValue(dtype, value)
if not iv.valid:
raise InvalidField(dtype, name, value)
return obj
class KIDLToThriftConverter(object):
"""Convert KIDL to Thrift IDL
"""
CONTAINER_MAPPING = {'mapping': 'map',
'list': 'list',
'tuple': 'list'}
def __init__(self, lines):
self._ln = lines
def process(self):
i = 0
result = []
while i < len(self._ln):
s = self._ln[i]
if re.match('\s*typedef\s+structure\s*\{', s):
r, offs = self.change_struct(self._ln, i)
i += offs
result.extend(r)
elif re.match('\s*typedef\s+', s):
o = s[:s.rfind(';')]
if '<' in o:
o += FIXME
result.append(o)
i += 1
else:
result.append(s)
i += 1
return result
def change_struct(self, ln, p):
result = ['struct {} {{']
i, done, fieldnum = 0, False, 1
while not done:
s = ln[p + i]
if re.match('\s*\}', s):
o = '}'
m = re.match('\s*\}\s*(\w+)', s)
name = m.group(1)
result[0] = result[0].format(name)
done = True
elif re.match('.*;\s*', s):
num = '{:d}: '.format(fieldnum)
if re.match('\s*(mapping|list|tuple)\s*<', s):
s = self.change_container(s)
o = num + s[:s.rfind(';')]
fieldnum += 1
else:
o = s
result.append(o)
i += 1
return result, i
def change_container(self, s):
if re.search('<.*?<', s):
return s + FIXME + ';'
m = re.match('\s*(\w+)\s*<', s)
ctype = m.group(1)
m = re.match('\s*{}\s*<\s*((\w+).*?)(,'
'\s*(\w+).*?)*>'.format(ctype), s)
new_ctype = self.CONTAINER_MAPPING[ctype]
groups = m.groups()
if groups[-1] is None: # only 1 match
groups = groups[:2]
result = ' ' + new_ctype + '<{}>;'.format(', '.join(groups[1::2]))
return result
def kidl_to_thrift_main():
ap = argparse.ArgumentParser()
ap.add_argument('file', metavar='path', help='Input filename')
ap.add_argument('--ofile', help='Output filename', dest='ofile',
default=None, metavar='path')
args = ap.parse_args()
fname = args.file
try:
lines = open(fname).read().split('\n')
except IOError as err:
print('Error opening input file: {}'.format(err))
return -1
ostream = sys.stdout
if args.ofile is not None:
try:
ostream = open(args.ofile, 'w')
except IOError as err:
print('Error opening output file: {}'.format(err))
return -2
converter = KIDLToThriftConverter(lines)
outlines = converter.process()
ostream.write('\n'.join(outlines))
return 0
if __name__ == '__main__':
sys.exit(kidl_to_thrift_main()) | {
"content_hash": "c49c3c1f46d2415b3da548d81c5e6422",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 86,
"avg_line_length": 31.039408866995075,
"alnum_prop": 0.5300745913347088,
"repo_name": "kbase/data_api",
"id": "68816ef3cad8d8ac2b3a4d36de1a7a24b8beea3d",
"size": "6301",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/doekbase/data_api/rpc_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9594"
},
{
"name": "HTML",
"bytes": "77123"
},
{
"name": "JavaScript",
"bytes": "594489"
},
{
"name": "Jupyter Notebook",
"bytes": "5342297"
},
{
"name": "Makefile",
"bytes": "10254"
},
{
"name": "Perl",
"bytes": "681703"
},
{
"name": "Python",
"bytes": "659006"
},
{
"name": "Shell",
"bytes": "4628"
},
{
"name": "Thrift",
"bytes": "45335"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from lomadee import models
@admin.register(models.Computer)
class ComputerAdmin(admin.ModelAdmin):
list_display = ('name', 'price', 'cpu', 'ram', 'disk',
'is_macbook', 'has_gpu', 'has_ssd')
| {
"content_hash": "b734283327039085862750429e6ce698",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 31.125,
"alnum_prop": 0.6506024096385542,
"repo_name": "msfernandes/facebook-chatbot",
"id": "f6e76088d34f74b70a3a3527e07cca9f616cb5cf",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/lomadee/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50152"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, unicode_literals
from abc import ABCMeta, abstractproperty, abstractmethod
from collections import namedtuple
from distutils.version import LooseVersion
from functools import wraps
from itertools import takewhile, dropwhile
import operator
import re
import sys
try:
from bs4 import BeautifulSoup, PageElement, NavigableString
except ImportError: # pragma: no cover
raise ImportError("Soupy requires beautifulsoup4")
try:
import six
from six.moves import map
assert LooseVersion(six.__version__) >= LooseVersion('1.9')
except(ImportError, AssertionError): # pragma: no cover
raise ImportError("Soupy requires six version 1.9 or later")
__version__ = '0.4.dev'
__all__ = ['Soupy', 'Q', 'Node', 'Scalar', 'Collection',
'Null', 'NullNode', 'NullCollection',
'either', 'NullValueError', 'QDebug']
# extract the thing inside string reprs (eg u'abc' -> abc)
QUOTED_STR = re.compile("^[ub]?['\"](.*?)['\"]$")
QDebug = namedtuple('QDebug', ('expr', 'inner_expr', 'val', 'inner_val'))
"""Namedtuple that holds information about a failed expression evaluation."""
@six.add_metaclass(ABCMeta)
class Wrapper(object):
@abstractmethod
def val(self):
pass # pragma: no cover
@abstractmethod
def orelse(self, value):
pass # pragma: no cover
def nonnull(self):
"""
Require that a node is not null
Null values will raise NullValueError, whereas nonnull
values return self.
useful for being strict about portions of queries.
Examples:
node.find('a').nonnull().find('b').orelse(3)
This will raise an error if find('a') doesn't match,
but provides a fallback if find('b') doesn't match.
"""
return self
@abstractmethod
def isnull(self):
pass # pragma: no cover
@abstractmethod
def map(self, func):
pass # pragma: no cover
@abstractmethod
def apply(self, func):
pass # pragma: no cover
@classmethod
def wrap(cls, value):
"""
Wrap value in the appropriate wrapper class,
based upon its type.
"""
if isinstance(value, Wrapper):
return value
if hasattr(value, 'children'):
return Node(value)
return Scalar(value)
def __getitem__(self, key):
return self.map(operator.itemgetter(key))
def dump(self, *args, **kwargs):
"""
Extract derived values into a Scalar(tuple) or Scalar(dict)
The keyword names passed to this function become keys in
the resulting dictionary, while positional arguments passed to
this function become elements in the resulting tuple.
The positional arguments and keyword values are functions that
are called on this Node.
Notes:
- The input functions are called on the Node, **not** the
underlying BeautifulSoup element
- If the function returns a wrapper, it will be unwrapped
- Only either positional arguments or keyword arguments may
be passed, not both.
Example:
>>> soup = Soupy("<b>hi</b>").find('b')
>>> data = soup.dump(name=Q.name, text=Q.text).val()
>>> data == {'text': 'hi', 'name': 'b'}
True
>> name, text = soup.dump(Q.name, Q.text).val()
>> (name, text) == ('hi', 'b')
True
"""
if args and kwargs:
raise ValueError('Cannot pass both arguments and keywords to dump')
if args:
result = tuple(_unwrap(self.apply(func)) for func in args)
else:
result = dict((name, _unwrap(self.apply(func)))
for name, func in kwargs.items())
return Wrapper.wrap(result)
@abstractmethod
def require(self, func, msg='Requirement Violated'):
pass # pragma: no cover
class NullValueError(ValueError):
"""
The NullValueError exception is raised when attempting
to extract values from Null objects
"""
pass
class QKeyError(KeyError):
"""
A custom KeyError subclass that better formats
exception messages raised inside expressions
"""
def __str__(self):
parts = self.args[0].split('\n\n\t')
return parts[0] + '\n\n\t' + _dequote(repr(parts[1]))
QKeyError.__name__ = str('KeyError')
@six.python_2_unicode_compatible
class BaseNull(Wrapper):
"""
This is the base class for null wrappers. Null values are returned
when the result of a function is ill-defined.
"""
def val(self):
"""
Raise :class:`NullValueError`
"""
raise NullValueError()
def orelse(self, value):
"""
Wraps value and returns the result
"""
return Wrapper.wrap(value)
def map(self, func):
"""
Returns :class:`Null`
"""
return self
def apply(self, func):
"""
Returns :class:`Null`
"""
return self
def nonnull(self):
"""
Raises :class:`NullValueError`
"""
raise NullValueError()
def require(self, func, msg="Requirement is violated (wrapper is null)"):
"""
Raises :class:`NullValueError`
"""
raise NullValueError()
def isnull(self):
"""
Return Scalar(True) if this item is a null value
"""
return Scalar(True)
def __setitem__(self, key, val):
pass
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return "%s()" % type(self).__name__
__repr__ = __str__
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
return type(self)()
def __ne__(self, other):
return type(self)()
@six.python_2_unicode_compatible
class Some(Wrapper):
def __init__(self, value):
self._value = value
def map(self, func):
"""
Call a function on a wrapper's value, and wrap the result if necessary.
Parameters:
func : function(val) -> val
Examples:
>>> s = Scalar(3)
>>> s.map(Q * 2)
Scalar(6)
"""
return Wrapper.wrap(_make_callable(func)(self._value))
def apply(self, func):
"""
Call a function on a wrapper, and wrap the result if necessary.
Parameters:
func: function(wrapper) -> val
Examples:
>>> s = Scalar(5)
>>> s.apply(lambda val: isinstance(val, Scalar))
Scalar(True)
"""
return Wrapper.wrap(_make_callable(func)(self))
def orelse(self, value):
"""
Provide a fallback value for failed matches.
Examples:
>>> Scalar(5).orelse(10).val()
5
>>> Null().orelse(10).val()
10
"""
return self
def val(self):
"""
Return the value inside a wrapper.
Raises :class:`NullValueError` if called on a Null object
"""
return self._value
def require(self, func, msg="Requirement violated"):
"""
Assert that self.apply(func) is True.
Parameters:
func : func(wrapper)
msg : str
The error message to display on failure
Returns:
If self.apply(func) is True, returns self.
Otherwise, raises NullValueError.
"""
if self.apply(func):
return self
raise NullValueError(msg)
def isnull(self):
"""
Return Scalar(True) if this item is a null value
"""
return Scalar(False)
def __str__(self):
# returns unicode
# six builds appropriate py2/3 methods from this
return "%s(%s)" % (type(self).__name__, _repr(self._value))
def __repr__(self):
return repr(self.__str__())[1:-1] # trim off quotes
def __setitem__(self, key, val):
return self.map(Q.__setitem__(key, val))
def __hash__(self):
return hash(self._value)
def __eq__(self, other):
return self.map(lambda x: x == other)
def __ne__(self, other):
return self.map(lambda x: x != other)
class Null(BaseNull):
"""
The class for ill-defined Scalars.
"""
def __getattr__(self, attr):
return Null()
def __call__(self, *args, **kwargs):
return Null()
def __gt__(self, other):
return Null()
def __ge__(self, other):
return Null()
def __lt__(self, other):
return Null()
def __le__(self, other):
return Null()
def __len__(self):
raise TypeError("Null has no len()")
def __add__(self, other):
return Null()
def __sub__(self, other):
return Null()
def __mul__(self, other):
return Null()
def __div__(self, other):
return Null()
def __floordiv__(self, other):
return Null()
def __pow__(self, other):
return Null()
def __mod__(self, other):
return Null()
def __truediv__(self, other):
return Null()
def __hash__(self):
return super(Null, self).__hash__()
class Scalar(Some):
"""
A wrapper around single values.
Scalars support boolean testing (<, ==, etc), and
use the wrapped value in the comparison. They return
the result as a Scalar(bool).
Calling a Scalar calls the wrapped value, and wraps
the result.
Examples:
>>> s = Scalar(3)
>>> s > 2
Scalar(True)
>>> s.val()
3
>>> s + 5
Scalar(8)
>>> s + s
Scalar(6)
>>> bool(Scalar(3))
True
>>> Scalar(lambda x: x+2)(5)
Scalar(7)
"""
def __getattr__(self, attr):
return self.map(operator.attrgetter(attr))
def __call__(self, *args, **kwargs):
return self.map(operator.methodcaller('__call__', *args, **kwargs))
def __gt__(self, other):
return self.map(lambda x: x > other)
def __ge__(self, other):
return self.map(lambda x: x >= other)
def __lt__(self, other):
return self.map(lambda x: x < other)
def __le__(self, other):
return self.map(lambda x: x <= other)
def __bool__(self):
return bool(self._value)
__nonzero__ = __bool__
def __len__(self):
return len(self._value)
def __add__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q + _unwrap(other))
def __sub__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q - _unwrap(other))
def __mul__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q * _unwrap(other))
def __div__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q / _unwrap(other))
def __floordiv__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q // _unwrap(other))
def __pow__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q ** _unwrap(other))
def __mod__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q % _unwrap(other))
def __truediv__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q / _unwrap(other))
class Collection(Some):
"""
Collection's store lists of other wrappers.
They support most of the list methods (len, iter, getitem, etc).
"""
def __init__(self, items):
super(Collection, self).__init__(list(items))
self._items = self._value
self._assert_items_are_wrappers()
def _assert_items_are_wrappers(self):
for item in self:
if not isinstance(item, Wrapper):
raise TypeError("Collection can only hold other wrappers")
def val(self):
"""
Unwraps each item in the collection, and returns as a list
"""
return list(self.iter_val())
def first(self):
"""
Return the first element of the collection, or :class:`Null`
"""
return self[0]
def iter_val(self):
"""
An iterator version of :meth:`val`
"""
return (item.val() for item in self._items)
def each(self, *funcs):
"""
Call `func` on each element in the collection.
If multiple functions are provided, each item
in the output will be a tuple of each
func(item) in self.
Returns a new Collection.
Example:
>>> col = Collection([Scalar(1), Scalar(2)])
>>> col.each(Q * 10)
Collection([Scalar(10), Scalar(20)])
>>> col.each(Q * 10, Q - 1)
Collection([Scalar((10, 0)), Scalar((20, 1))])
"""
funcs = list(map(_make_callable, funcs))
if len(funcs) == 1:
return Collection(map(funcs[0], self._items))
tupler = lambda item: Scalar(
tuple(_unwrap(func(item)) for func in funcs))
return Collection(map(tupler, self._items))
def exclude(self, func=None):
"""
Return a new Collection excluding some items
Parameters:
func : function(Node) -> Scalar
A function that, when called on each item
in the collection, returns a boolean-like
value. If no function is provided, then
truthy items will be removed.
Returns:
A new Collection consisting of the items
where bool(func(item)) == False
"""
func = _make_callable(func)
inverse = lambda x: not func(x)
return self.filter(inverse)
def filter(self, func=None):
"""
Return a new Collection with some items removed.
Parameters:
func : function(Node) -> Scalar
A function that, when called on each item
in the collection, returns a boolean-like
value. If no function is provided, then
false-y items will be removed.
Returns:
A new Collection consisting of the items
where bool(func(item)) == True
Examples:
node.find_all('a').filter(Q['href'].startswith('http'))
"""
func = _make_callable(func)
return Collection(filter(func, self._items))
def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items))
def dropwhile(self, func=None):
"""
Return a new Collection with the first few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
before the first item where bool(func(item)) == True
"""
func = _make_callable(func)
return Collection(dropwhile(func, self._items))
def __getitem__(self, key):
if isinstance(key, int):
try:
return self._items[key]
except IndexError:
return NullNode()
# slice
return Collection(list(self._items).__getitem__(key))
def dump(self, *args, **kwargs):
"""
Build a list of dicts, by calling :meth:`Node.dump`
on each item.
Each keyword provides a function that extracts a value
from a Node.
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dump(x2=Q*2, m1=Q-1).val()
[{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}]
"""
return self.each(Q.dump(*args, **kwargs))
def __len__(self):
return self.map(len).val()
def count(self):
"""
Return the number of items in the collection, as a :class:`Scalar`
"""
return Scalar(len(self))
def zip(self, *others):
"""
Zip the items of this collection with one or more
other sequences, and wrap the result.
Unlike Python's zip, all sequences must be the same length.
Parameters:
others: One or more iterables or Collections
Returns:
A new collection.
Examples:
>>> c1 = Collection([Scalar(1), Scalar(2)])
>>> c2 = Collection([Scalar(3), Scalar(4)])
>>> c1.zip(c2).val()
[(1, 3), (2, 4)]
"""
args = [_unwrap(item) for item in (self,) + others]
ct = self.count()
if not all(len(arg) == ct for arg in args):
raise ValueError("Arguments are not all the same length")
return Collection(map(Wrapper.wrap, zip(*args)))
def dictzip(self, keys):
"""
Turn this collection into a Scalar(dict), by zipping keys and items.
Parameters:
keys: list or Collection of NavigableStrings
The keys of the dictionary
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dictzip(['a', 'b']).val() == {'a': 1, 'b': 2}
True
"""
return Scalar(dict(zip(_unwrap(keys), self.val())))
def __iter__(self):
for item in self._items:
yield item
def all(self):
"""
Scalar(True) if all items are truthy, or collection is empty.
"""
return self.map(all)
def any(self):
"""
Scalar(True) if any items are truthy. False if empty.
"""
return self.map(any)
def none(self):
"""
Scalar(True) if no items are truthy, or collection is empty.
"""
return self.map(lambda items: not any(items))
def __bool__(self):
return bool(self._items)
__nonzero__ = __bool__
class NullCollection(BaseNull, Collection):
"""
Represents in invalid Collection.
Returned by some methods on other Null objects.
"""
def __init__(self):
pass
def iter_val(self):
raise NullValueError()
def each(self, func):
return self
def filter(self, func=None):
return self
def takewhile(self, func=None):
return self
def dropwhile(self, func=None):
return self
def first(self):
return NullNode() # XXX don't like this assumption
def __getitem__(self, key):
if isinstance(key, int):
return NullNode() # XXX don't like this assumption
# slice
return self
def dump(self, *args, **kwargs):
return NullCollection()
def count(self):
return Scalar(0)
@six.add_metaclass(ABCMeta)
class NodeLike(object):
# should return NodeLike
parent = abstractproperty()
next_sibling = abstractproperty()
previous_sibling = abstractproperty()
# should return scalar-like
text = abstractproperty()
attrs = abstractproperty()
name = abstractproperty()
# should return CollectionLike
children = abstractproperty()
contents = abstractproperty()
descendants = abstractproperty()
parents = abstractproperty()
next_siblings = abstractproperty()
previous_siblings = abstractproperty()
@abstractmethod
def find(self, name=None, attrs={}, recursive=True,
text=None, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_all(self, name=None, attrs={}, recursive=True,
text=None, **kwargs):
pass # pragma: no cover
@abstractmethod
def select(self, selector):
pass # pragma: no cover
@abstractmethod
def find_next_sibling(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_previous_sibling(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_parent(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_next_siblings(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_previous_siblings(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def find_parents(self, *args, **kwargs):
pass # pragma: no cover
@abstractmethod
def prettify(self):
pass # pragma: no cover
def __iter__(self):
for item in self.children:
yield item
def __call__(self, *args, **kwargs):
return self.find_all(*args, **kwargs)
class Node(NodeLike, Some):
"""
The Node class is the main wrapper around
BeautifulSoup elements like Tag. It implements many of the
same properties and methods as BeautifulSoup for navigating
through documents, like find, select, parents, etc.
"""
def __new__(cls, value, *args, **kwargs):
if isinstance(value, NavigableString):
return object.__new__(NavigableStringNode)
return object.__new__(cls)
def _wrap_node(self, func):
val = func(self._value)
return NullNode() if val is None else Node(val)
def _wrap_multi(self, func):
vals = func(self._value)
return Collection(map(Node, vals))
def _wrap_scalar(self, func):
val = func(self._value)
return Scalar(val)
@property
def children(self):
"""
A :class:`Collection` of the child elements.
"""
return self._wrap_multi(operator.attrgetter('children'))
@property
def parents(self):
"""
A :class:`Collection` of the parents elements.
"""
return self._wrap_multi(operator.attrgetter('parents'))
@property
def contents(self):
"""
A :class:`Collection` of the child elements.
"""
return self._wrap_multi(operator.attrgetter('contents'))
@property
def descendants(self):
"""
A :class:`Collection` of all elements nested inside this Node.
"""
return self._wrap_multi(operator.attrgetter('descendants'))
@property
def next_siblings(self):
"""
A :class:`Collection` of all siblings after this node
"""
return self._wrap_multi(operator.attrgetter('next_siblings'))
@property
def previous_siblings(self):
"""
A :class:`Collection` of all siblings before this node
"""
return self._wrap_multi(operator.attrgetter('previous_siblings'))
@property
def parent(self):
"""
The parent :class:`Node`, or :class:`NullNode`
"""
return self._wrap_node(operator.attrgetter('parent'))
@property
def next_sibling(self):
"""
The :class:`Node` sibling after this, or :class:`NullNode`
"""
return self._wrap_node(operator.attrgetter('next_sibling'))
@property
def previous_sibling(self):
"""
The :class:`Node` sibling prior to this, or :class:`NullNode`
"""
return self._wrap_node(operator.attrgetter('previous_sibling'))
@property
def attrs(self):
"""
A :class:`Scalar` of this Node's attribute dictionary
Example:
>>> Soupy("<a val=3></a>").find('a').attrs
Scalar({u'val': u'3'})
"""
return self._wrap_scalar(operator.attrgetter('attrs'))
@property
def text(self):
"""
A :class:`Scalar` of this Node's text.
Example:
>>> node = Soupy('<p>hi there</p>').find('p')
>>> node
Node(<p>hi there</p>)
>>> node.text
Scalar(u'hi there')
"""
return self._wrap_scalar(operator.attrgetter('text'))
@property
def name(self):
"""
A :class:`Scalar` of this Node's tag name.
Example:
>>> node = Soupy('<p>hi there</p>').find('p')
>>> node
Node(<p>hi there</p>)
>>> node.name
Scalar(u'p')
"""
return self._wrap_scalar(operator.attrgetter('name'))
def find(self, *args, **kwargs):
"""
Find a single Node among this Node's descendants.
Returns :class:`NullNode` if nothing matches.
This inputs to this function follow the same semantics
as BeautifulSoup. See http://bit.ly/bs4doc for more info.
Examples:
- node.find('a') # look for `a` tags
- node.find('a', 'foo') # look for `a` tags with class=`foo`
- node.find(func) # find tag where func(tag) is True
- node.find(val=3) # look for tag like <a, val=3>
"""
op = operator.methodcaller('find', *args, **kwargs)
return self._wrap_node(op)
def find_next_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`next_siblings`
"""
op = operator.methodcaller('find_next_sibling', *args, **kwargs)
return self._wrap_node(op)
def find_parent(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`parents`
"""
op = operator.methodcaller('find_parent', *args, **kwargs)
return self._wrap_node(op)
def find_previous_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_sibling', *args, **kwargs)
return self._wrap_node(op)
def find_all(self, *args, **kwargs):
"""
Like :meth:`find`, but selects all matches (not just the first one).
Returns a :class:`Collection`.
If no elements match, this returns a Collection with no items.
"""
op = operator.methodcaller('find_all', *args, **kwargs)
return self._wrap_multi(op)
def find_next_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`next_siblings`
"""
op = operator.methodcaller('find_next_siblings', *args, **kwargs)
return self._wrap_multi(op)
def find_parents(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`parents`
"""
op = operator.methodcaller('find_parents', *args, **kwargs)
return self._wrap_multi(op)
def find_previous_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_siblings', *args, **kwargs)
return self._wrap_multi(op)
def select(self, selector):
"""
Like :meth:`find_all`, but takes a CSS selector string as input.
"""
op = operator.methodcaller('select', selector)
return self._wrap_multi(op)
def prettify(self):
return self.map(Q.prettify()).val()
def __len__(self):
return len(self._value)
def __bool__(self):
return True
__nonzero__ = __bool__
class NavigableStringNode(Node):
"""
The NavigableStringNode is a special case Node that wraps
BeautifulSoup NavigableStrings. This class implements sensible
versions of properties and methods that are missing from
the NavigableString object.
"""
@property
def attrs(self):
"""
An empty :class:`Scalar` dict
"""
return Scalar({})
@property
def text(self):
"""
A :class:`Scalar` of the string value
"""
return Scalar(self._value.string)
@property
def name(self):
"""
An empty :class:`Scalar` dict
"""
return Scalar('')
@property
def children(self):
"""
An empty :class:`Collection`
"""
return Collection([])
@property
def contents(self):
"""
An empty :class:`Collection`
"""
return Collection([])
@property
def descendants(self):
"""
An empty :class:`Collection`
"""
return Collection([])
def find(self, *args, **kwargs):
"""
Returns :class:`NullNode`
"""
return NullNode()
def find_all(self, *args, **kwargs):
"""
Returns an empty :class:`Collection`
"""
return Collection([])
def select(self, selector):
"""
Returns an empty :class:`Collection`
"""
return Collection([])
def prettify(self):
return self.text.val()
class NullNode(NodeLike, BaseNull):
"""
NullNode is returned when a query doesn't match any node
in the document.
"""
def _get_null(self):
"""
Returns the NullNode
"""
return NullNode()
def _get_null_set(self):
"""
Returns the NullCollection
"""
return NullCollection()
children = property(_get_null_set)
parents = property(_get_null_set)
contents = property(_get_null_set)
descendants = property(_get_null_set)
next_siblings = property(_get_null_set)
previous_siblings = property(_get_null_set)
parent = property(_get_null)
next_sibling = property(_get_null)
previous_sibling = property(_get_null)
attrs = property(lambda self: Null())
text = property(lambda self: Null())
name = property(lambda self: Null())
def find(self, *args, **kwargs):
"""
Returns :class:`NullNode`
"""
return NullNode()
def find_parent(self, *args, **kwargs):
"""
Returns :class:`NullNode`
"""
return NullNode()
def find_previous_sibling(self, *args, **kwargs):
"""
Returns :class:`NullNode`
"""
return NullNode()
def find_next_sibling(self, *args, **kwargs):
"""
Returns :class:`NullNode`
"""
return NullNode()
def find_all(self, *args, **kwargs):
"""
Returns :class:`NullCollection`
"""
return NullCollection()
def find_parents(self, *args, **kwargs):
"""
Returns :class:`NullCollection`
"""
return NullCollection()
def find_next_siblings(self, *args, **kwargs):
"""
Returns :class:`NullCollection`
"""
return NullCollection()
def find_previous_siblings(self, *args, **kwargs):
"""
Returns :class:`NullCollection`
"""
return NullCollection()
def select(self, selector):
"""
Returns :class:`NullCollection`
"""
return NullCollection()
def dump(self, *args, **kwargs):
"""
Returns :class:`Null`
"""
return Null()
def prettify(self):
return "Null Node"
def __len__(self):
return 0
def either(*funcs):
"""
A utility function for selecting the first non-null query.
Parameters:
funcs: One or more functions
Returns:
A function that, when called with a :class:`Node`, will
pass the input to each `func`, and return the first non-Falsey
result.
Examples:
>>> s = Soupy("<p>hi</p>")
>>> s.apply(either(Q.find('a'), Q.find('p').text))
Scalar('hi')
"""
def either(val):
for func in funcs:
result = val.apply(func)
if result:
return result
return Null()
return either
def _helpful_failure(method):
"""
Decorator for eval_ that prints a helpful error message
if an exception is generated in a Q expression
"""
@wraps(method)
def wrapper(self, val):
try:
return method(self, val)
except:
exc_cls, inst, tb = sys.exc_info()
if hasattr(inst, '_RERAISE'):
_, expr, _, inner_val = Q.__debug_info__
Q.__debug_info__ = QDebug(self, expr, val, inner_val)
raise
if issubclass(exc_cls, KeyError): # Overrides formatting
exc_cls = QKeyError
# Show val, unless it's too long
prettyval = repr(val)
if len(prettyval) > 150:
prettyval = "<%s instance>" % (type(val).__name__)
msg = "{0}\n\n\tEncountered when evaluating {1}{2}".format(
inst, prettyval, self)
new_exc = exc_cls(msg)
new_exc._RERAISE = True
Q.__debug_info__ = QDebug(self, self, val, val)
six.reraise(exc_cls, new_exc, tb)
return wrapper
@six.python_2_unicode_compatible
class Expression(object):
"""
Soupy expressions are a shorthand for building single-argument functions.
Users should use the ``Q`` object, which is just an instance of Expression.
"""
def __str__(self):
return 'Q'
def __repr__(self):
return repr(str(self))[1:-1] # trim quotes
def __iter__(self):
yield self
def _chain(self, other):
return Chain(tuple(iter(self)) + tuple(iter(other)))
def __getattr__(self, key):
return self._chain(Attr(key))
def __getitem__(self, key):
return self._chain(GetItem(key))
def __call__(self, *args, **kwargs):
return self._chain(Call(args, kwargs))
def __gt__(self, other):
return BinaryOp(operator.gt, '>', self, other)
def __ge__(self, other):
return BinaryOp(operator.ge, '>=', self, other)
def __lt__(self, other):
return BinaryOp(operator.lt, '<', self, other)
def __le__(self, other):
return BinaryOp(operator.le, '<=', self, other)
def __eq__(self, other):
return BinaryOp(operator.eq, '==', self, other)
def __ne__(self, other):
return BinaryOp(operator.ne, '!=', self, other)
def __add__(self, other):
return BinaryOp(operator.add, '+', self, other)
def __sub__(self, other):
return BinaryOp(operator.sub, '-', self, other)
def __div__(self, other):
return BinaryOp(operator.__div__, '/', self, other)
def __floordiv__(self, other):
return BinaryOp(operator.floordiv, '//', self, other)
def __truediv__(self, other):
return BinaryOp(operator.truediv, '/', self, other)
def __mul__(self, other):
return BinaryOp(operator.mul, '*', self, other)
def __rmul__(self, other):
return BinaryOp(operator.mul, '*', other, self)
def __pow__(self, other):
return BinaryOp(operator.pow, '**', self, other)
def __mod__(self, other):
return BinaryOp(operator.mod, '%', self, other)
@_helpful_failure
def eval_(self, val):
"""
Pass the argument ``val`` to the function, and return the result.
This special method is necessary because the ``__call__`` method
builds a new function stead of evaluating the current one.
"""
return val
def debug_(self):
"""
Returns debugging information for the previous error raised
during expression evaluation.
Returns a QDebug namedtuple with four fields:
- expr is the last full expression to have raised an exception
- inner_expr is the specific sub-expression that raised the exception
- val is the value that expr tried to evaluate.
- inner_val is the value that inner_expr tried to evaluate
If no exceptions have been triggered from expression evaluation,
then each field is None.
Examples:
>>> Scalar('test').map(Q.upper().foo)
Traceback (most recent call last):
...
AttributeError: 'str' object has no attribute 'foo'
...
>>> dbg = Q.debug_()
>>> dbg.expr
Q.upper().foo
>>> dbg.inner_expr
.foo
>>> dbg.val
'test'
>>> dbg.inner_val
'TEST'
"""
result = self.__debug_info__
if isinstance(result, QDebug):
return result
return QDebug(None, None, None, None)
@six.python_2_unicode_compatible
class Call(Expression):
"""An expression for calling a function or method"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
@_helpful_failure
def eval_(self, val):
return val.__call__(*self._args, **self._kwargs)
def __str__(self):
result = list(map(_uniquote, self._args))
if self._kwargs:
result.append('**%s' % _uniquote(self._kwargs))
return '(%s)' % (', '.join(result))
@six.python_2_unicode_compatible
class BinaryOp(Expression):
"""A binary operation"""
def __init__(self, op, symbol, left, right):
self.op = op
self.left = left
self.right = right
self.symbol = symbol
@_helpful_failure
def eval_(self, val):
left = self.left
right = self.right
if isinstance(left, Expression):
left = left.eval_(val)
if isinstance(right, Expression):
right = right.eval_(val)
return self.op(left, right)
def __str__(self):
l, r = self.left, self.right
if isinstance(l, BinaryOp):
l = '(%s)' % str(l)
if isinstance(r, BinaryOp):
r = '(%s)' % str(r)
return "%s %s %s" % (l, self.symbol, r)
@six.python_2_unicode_compatible
class Attr(Expression):
"""An expression for fetching an attribute (eg, obj.item)"""
def __init__(self, attribute_name):
self._name = attribute_name
@_helpful_failure
def eval_(self, val):
return operator.attrgetter(self._name)(val)
def __str__(self):
return '.%s' % self._name
@six.python_2_unicode_compatible
class GetItem(Expression):
"""An expression for getting an item (eg, obj['item'])"""
def __init__(self, key):
self._name = key
@_helpful_failure
def eval_(self, val):
return operator.itemgetter(self._name)(val)
def __str__(self):
return "[%s]" % _uniquote(self._name)
@six.python_2_unicode_compatible
class Chain(Expression):
"""An chain of expressions (eg a.b.c)"""
def __init__(self, items):
self._items = items
def __iter__(self):
for item in self._items:
yield item
@_helpful_failure
def eval_(self, val):
for item in self._items:
val = item.eval_(val)
return val
def __str__(self):
return ''.join(map(_uniquote, self._items))
def _make_callable(func):
# If func is an expression, we call via eval_
# otherwise, we call func directly
if func is None:
func = Q
return getattr(func, 'eval_', func)
def _unwrap(val):
if isinstance(val, Wrapper):
return val.val()
return val
def _dequote(str):
try:
return QUOTED_STR.findall(str)[0]
except IndexError:
raise AssertionError("Not a quoted string")
def _uniquote(value):
"""
Convert to unicode, and add quotes if initially a string
"""
if isinstance(value, six.binary_type):
try:
value = value.decode('utf-8')
except UnicodeDecodeError: # Not utf-8. Show the repr
value = six.text_type(_dequote(repr(value))) # trim quotes
result = six.text_type(value)
if isinstance(value, six.text_type):
result = "'%s'" % result
return result
def _repr(value):
value = repr(value)
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return value
class Soupy(Node):
def __init__(self, val, *args, **kwargs):
if not isinstance(val, PageElement):
val = BeautifulSoup(val, *args, **kwargs)
super(Soupy, self).__init__(val)
Q = Expression()
| {
"content_hash": "69d3125737c152c9ac3faab8584d3d76",
"timestamp": "",
"source": "github",
"line_count": 1615,
"max_line_length": 79,
"avg_line_length": 25.11455108359133,
"alnum_prop": 0.5531558185404339,
"repo_name": "ChrisBeaumont/soupy",
"id": "ec4903efd2f1fc708a27d50e7945157d9a9e15da",
"size": "40560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soupy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65561"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017, Ryan Dellana
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Ryan Dellana nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Ryan Dellana BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy as np
from TensorflowUtil import weight_variable, bias_variable, conv2d, conv_layer
from TensorflowUtil import conv_layer_, fc_layer, fc_layer_, identity_in, flattened
from TensorflowUtil import normal_log, negative_log_likelihood, max_pool_2x2
# 640 x 480
class cnn_cccccfffff(object):
def __init__(self):
self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
self.y_ = tf.placeholder(tf.float32, [None, 1])
(self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
(self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
(self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
(self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
(self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
self.h_conv5_flat = flattened(self.h_conv5)
(self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
(self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
(self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
(self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
W_fc5 = weight_variable([10, 1])
b_fc5 = bias_variable([1])
self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
self.loss = tf.reduce_mean(tf.abs(tf.sub(self.y_, self.y_out)))
| {
"content_hash": "8edcbe37996d03562336941d1cdd10f8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 150,
"avg_line_length": 57.642857142857146,
"alnum_prop": 0.7050805452292441,
"repo_name": "DJTobias/Cherry-Autonomous-Racecar",
"id": "02734ccff3f48c0ff9d2cdf6327448dc05b23524",
"size": "3250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "car/scripts/car_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "38605"
},
{
"name": "CMake",
"bytes": "6491"
},
{
"name": "Python",
"bytes": "80796"
},
{
"name": "Shell",
"bytes": "947"
}
],
"symlink_target": ""
} |
import unittest
import datetime
import numpy as np
import desisurvey.plan
import desisurvey.etc
import desisurvey.config
import desisurvey.utils
from desisurvey.test.base import Tester
from desisurvey.scripts import surveyinit
from desisurvey.scheduler import Scheduler
class TestScheduler(Tester):
def test_scheduler(self):
cmd = 'surveyinit --max-cycles 5 --init zero'
args = surveyinit.parse(cmd.split()[1:])
surveyinit.main(args)
config = desisurvey.config.Configuration()
config.fiber_assignment_cadence.set_value('daily')
planner = desisurvey.plan.Planner(simulate=True)
planner.first_night = desisurvey.utils.get_date('2020-01-01')
planner.last_night = desisurvey.utils.get_date('2025-01-01')
scheduler = Scheduler(planner)
num_nights = (self.stop - self.start).days
for i in range(num_nights):
night = self.start + datetime.timedelta(i)
# Save and restore scheduler state.
planner.save('snapshot.ecsv')
planner2 = desisurvey.plan.Planner(restore='snapshot.ecsv',
simulate=True)
self.assertTrue(np.all(planner.donefrac == planner2.donefrac))
self.assertTrue(np.all(planner.tile_status == planner2.tile_status))
avail, planned = planner.afternoon_plan(night)
avail2, planned2 = planner2.afternoon_plan(night)
scheduler2 = Scheduler(planner2)
self.assertTrue(np.all(scheduler.plan.obsend() ==
scheduler2.plan.obsend()))
self.assertTrue(np.all(scheduler.plan.obsend_by_program() ==
scheduler2.plan.obsend_by_program()))
self.assertTrue(np.all(avail == avail2))
self.assertTrue(np.all(planned == planned2))
# Run both schedulers in parallel.
scheduler.init_night(night)
scheduler2.init_night(night)
# Loop over exposures during the night.
dusk, dawn = scheduler.night_ephem['dusk'], scheduler.night_ephem['dawn']
ETC = desisurvey.etc.ExposureTimeCalculator()
for mjd in np.arange(dusk, dawn, 15. / (24. * 60.)):
# TILEID,PROGRAM,SNR2FRAC,EXPFAC,AIRMASS,PROGRAM,PROGEND
next = scheduler.next_tile(mjd, ETC, seeing=1.1, transp=0.95, skylevel=1)
# Check that the restored scheduler gives the same results.
next2 = scheduler2.next_tile(mjd, ETC, seeing=1.1, transp=0.95, skylevel=1)
for field, field2 in zip(next, next2):
self.assertEqual(field, field2)
tileid = next[0]
if tileid is not None:
scheduler.update_snr(tileid, 1.)
scheduler2.update_snr(tileid, 1.)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| {
"content_hash": "da23b569aac382e2bbbea7860a72ab42",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 91,
"avg_line_length": 44.73913043478261,
"alnum_prop": 0.6112730806608357,
"repo_name": "desihub/desisurvey",
"id": "7caa0c7d178df9f5b0d4118401039b65eb8ddf44",
"size": "3087",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desisurvey/test/test_scheduler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "533431"
},
{
"name": "Shell",
"bytes": "3254"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2016, Mark Rogaski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
import requests
from django.db import models
from django.contrib.auth.models import User, Group
from django.utils.encoding import python_2_unicode_compatible
from discord_bind.conf import settings
import logging
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class DiscordUser(models.Model):
""" Discord User mapping. """
user = models.ForeignKey(User, on_delete=models.CASCADE)
uid = models.CharField(max_length=20, blank=False, unique=True)
username = models.CharField(max_length=254)
discriminator = models.CharField(max_length=4)
avatar = models.CharField(max_length=32, blank=True)
email = models.EmailField(max_length=254, blank=True)
access_token = models.CharField(max_length=32, blank=True)
refresh_token = models.CharField(max_length=32, blank=True)
scope = models.CharField(max_length=256, blank=True)
expiry = models.DateTimeField(null=True)
def __str__(self):
return self.username + '.' + self.discriminator
@python_2_unicode_compatible
class DiscordInvite(models.Model):
""" Discord instant invites """
TEXT = 'text'
VOICE = 'voice'
CHANNEL_TYPE_CHOICES = (
(TEXT, 'text'),
(VOICE, 'voice'),
)
code = models.CharField(max_length=32, unique=True)
active = models.BooleanField(default=False)
groups = models.ManyToManyField(Group, blank=True,
related_name='discord_invites')
description = models.CharField(max_length=256, blank=True)
guild_name = models.CharField(max_length=64, blank=True)
guild_id = models.CharField(max_length=20, blank=True)
guild_icon = models.CharField(max_length=32, blank=True)
channel_name = models.CharField(max_length=64, blank=True)
channel_id = models.CharField(max_length=20, blank=True)
channel_type = models.CharField(max_length=5, blank=True,
choices=CHANNEL_TYPE_CHOICES)
def __str__(self):
return self.code
def update_context(self):
result = False
r = requests.get(settings.DISCORD_BASE_URI + '/invites/' + self.code)
if r.status_code == requests.codes.ok:
logger.info('fetched data for Discord invite %s' % self.code)
invite = r.json()
try:
self.guild_name = invite['guild']['name']
self.guild_id = invite['guild']['id']
self.guild_icon = invite['guild']['icon']
self.channel_name = invite['channel']['name']
self.channel_id = invite['channel']['id']
self.channel_type = invite['channel']['type']
self.save()
result = True
except KeyError:
pass
else:
logger.error(('failed to fetch data for '
'Discord invite %s: %d %s') % (self.code,
r.status_code,
r.reason))
return result
| {
"content_hash": "2c7b03119fecb2c8b5e9d38ff31c46a2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 40.25961538461539,
"alnum_prop": 0.6520181514210652,
"repo_name": "mrogaski/django-discord-bind",
"id": "364f56cb9b08398ca00de757aebb8dc6c3e71b87",
"size": "4187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discord_bind/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45876"
}
],
"symlink_target": ""
} |
def Thread(id, uri, title):
return {
"id": id,
"uri": uri,
"title": title
}
class Threads(object):
def __init__(self, db):
self.db = db
self.db.execute([
'CREATE TABLE IF NOT EXISTS threads (',
' id INTEGER PRIMARY KEY, uri VARCHAR(256) UNIQUE, title VARCHAR(256))'])
def __contains__(self, uri):
return self.db.execute("SELECT title FROM threads WHERE uri=?", (uri, )) \
.fetchone() is not None
def __getitem__(self, uri):
return Thread(*self.db.execute("SELECT * FROM threads WHERE uri=?", (uri, )).fetchone())
def get(self, id):
return Thread(*self.db.execute("SELECT * FROM threads WHERE id=?", (id, )).fetchone())
def new(self, uri, title):
self.db.execute(
"INSERT INTO threads (uri, title) VALUES (?, ?)", (uri, title))
return self[uri]
| {
"content_hash": "986e1770be187d857c6dd43059bf4bd1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 96,
"avg_line_length": 29.70967741935484,
"alnum_prop": 0.5407166123778502,
"repo_name": "posativ/isso",
"id": "060f074c92689123d00bb8c781c807d772f942b5",
"size": "949",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "isso/db/threads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10278"
},
{
"name": "Dockerfile",
"bytes": "2875"
},
{
"name": "HTML",
"bytes": "8688"
},
{
"name": "JavaScript",
"bytes": "135728"
},
{
"name": "Makefile",
"bytes": "4844"
},
{
"name": "Python",
"bytes": "223741"
},
{
"name": "Shell",
"bytes": "3156"
}
],
"symlink_target": ""
} |
import io
from pkgutil import walk_packages
from setuptools import setup
def find_packages(path):
# This method returns packages and subpackages as well.
return [name for _, name, is_pkg in walk_packages([path]) if is_pkg]
def read_file(filename):
with io.open(filename) as fp:
return fp.read().strip()
def read_rst(filename):
# Ignore unsupported directives by pypi.
content = read_file(filename)
return ''.join(line for line in io.StringIO(content)
if not line.startswith('.. comment::'))
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
setup(
name='scrapy-inline-requests',
version=read_file('VERSION'),
description="A decorator for writing coroutine-like spider callbacks.",
long_description=read_rst('README.rst') + '\n\n' + read_rst('HISTORY.rst'),
author="Rolando Espinoza",
author_email='rolando at rmax.io',
url='https://github.com/rolando/scrapy-inline-requests',
packages=list(find_packages('src')),
package_dir={'': 'src'},
setup_requires=read_requirements('requirements-setup.txt'),
install_requires=read_requirements('requirements-install.txt'),
include_package_data=True,
license="MIT",
keywords='scrapy-inline-requests',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| {
"content_hash": "48cfc24f64d6eec54bcfa882a206e089",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 33.18518518518518,
"alnum_prop": 0.6484375,
"repo_name": "darkrho/scrapy-inline-requests",
"id": "c30d1b7d06df9ffc1bc312e5ae73c41219a96d54",
"size": "1838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6250"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.